1f6ffbd4fSLucas Stach // SPDX-License-Identifier: GPL-2.0
2a8c21a54SThe etnaviv authors /*
3f6ffbd4fSLucas Stach  * Copyright (C) 2015-2018 Etnaviv Project
4a8c21a54SThe etnaviv authors  */
5a8c21a54SThe etnaviv authors 
6f9d255f4SLucas Stach #include <linux/clk.h>
7a8c21a54SThe etnaviv authors #include <linux/component.h>
86eae41feSSam Ravnborg #include <linux/delay.h>
9f54d1867SChris Wilson #include <linux/dma-fence.h>
106eae41feSSam Ravnborg #include <linux/dma-mapping.h>
11722d4f06SRob Herring #include <linux/mod_devicetable.h>
122e737e52SLucas Stach #include <linux/module.h>
132e737e52SLucas Stach #include <linux/platform_device.h>
142e737e52SLucas Stach #include <linux/pm_runtime.h>
15f9d255f4SLucas Stach #include <linux/regulator/consumer.h>
16bcdfb5e5SRussell King #include <linux/thermal.h>
17ea1f5729SLucas Stach 
18ea1f5729SLucas Stach #include "etnaviv_cmdbuf.h"
19a8c21a54SThe etnaviv authors #include "etnaviv_dump.h"
20a8c21a54SThe etnaviv authors #include "etnaviv_gpu.h"
21a8c21a54SThe etnaviv authors #include "etnaviv_gem.h"
22a8c21a54SThe etnaviv authors #include "etnaviv_mmu.h"
23357713ceSChristian Gmeiner #include "etnaviv_perfmon.h"
24e93b6deeSLucas Stach #include "etnaviv_sched.h"
25a8c21a54SThe etnaviv authors #include "common.xml.h"
26a8c21a54SThe etnaviv authors #include "state.xml.h"
27a8c21a54SThe etnaviv authors #include "state_hi.xml.h"
28a8c21a54SThe etnaviv authors #include "cmdstream.xml.h"
29a8c21a54SThe etnaviv authors 
30a8c21a54SThe etnaviv authors static const struct platform_device_id gpu_ids[] = {
31a8c21a54SThe etnaviv authors 	{ .name = "etnaviv-gpu,2d" },
32a8c21a54SThe etnaviv authors 	{ },
33a8c21a54SThe etnaviv authors };
34a8c21a54SThe etnaviv authors 
35a8c21a54SThe etnaviv authors /*
36a8c21a54SThe etnaviv authors  * Driver functions:
37a8c21a54SThe etnaviv authors  */
38a8c21a54SThe etnaviv authors 
etnaviv_gpu_get_param(struct etnaviv_gpu * gpu,u32 param,u64 * value)39a8c21a54SThe etnaviv authors int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
40a8c21a54SThe etnaviv authors {
41088880ddSLucas Stach 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
42088880ddSLucas Stach 
43a8c21a54SThe etnaviv authors 	switch (param) {
44a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_MODEL:
45a8c21a54SThe etnaviv authors 		*value = gpu->identity.model;
46a8c21a54SThe etnaviv authors 		break;
47a8c21a54SThe etnaviv authors 
48a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_REVISION:
49a8c21a54SThe etnaviv authors 		*value = gpu->identity.revision;
50a8c21a54SThe etnaviv authors 		break;
51a8c21a54SThe etnaviv authors 
52a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_FEATURES_0:
53a8c21a54SThe etnaviv authors 		*value = gpu->identity.features;
54a8c21a54SThe etnaviv authors 		break;
55a8c21a54SThe etnaviv authors 
56a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_FEATURES_1:
57a8c21a54SThe etnaviv authors 		*value = gpu->identity.minor_features0;
58a8c21a54SThe etnaviv authors 		break;
59a8c21a54SThe etnaviv authors 
60a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_FEATURES_2:
61a8c21a54SThe etnaviv authors 		*value = gpu->identity.minor_features1;
62a8c21a54SThe etnaviv authors 		break;
63a8c21a54SThe etnaviv authors 
64a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_FEATURES_3:
65a8c21a54SThe etnaviv authors 		*value = gpu->identity.minor_features2;
66a8c21a54SThe etnaviv authors 		break;
67a8c21a54SThe etnaviv authors 
68a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_FEATURES_4:
69a8c21a54SThe etnaviv authors 		*value = gpu->identity.minor_features3;
70a8c21a54SThe etnaviv authors 		break;
71a8c21a54SThe etnaviv authors 
72602eb489SRussell King 	case ETNAVIV_PARAM_GPU_FEATURES_5:
73602eb489SRussell King 		*value = gpu->identity.minor_features4;
74602eb489SRussell King 		break;
75602eb489SRussell King 
76602eb489SRussell King 	case ETNAVIV_PARAM_GPU_FEATURES_6:
77602eb489SRussell King 		*value = gpu->identity.minor_features5;
78602eb489SRussell King 		break;
79602eb489SRussell King 
800538aaf9SLucas Stach 	case ETNAVIV_PARAM_GPU_FEATURES_7:
810538aaf9SLucas Stach 		*value = gpu->identity.minor_features6;
820538aaf9SLucas Stach 		break;
830538aaf9SLucas Stach 
840538aaf9SLucas Stach 	case ETNAVIV_PARAM_GPU_FEATURES_8:
850538aaf9SLucas Stach 		*value = gpu->identity.minor_features7;
860538aaf9SLucas Stach 		break;
870538aaf9SLucas Stach 
880538aaf9SLucas Stach 	case ETNAVIV_PARAM_GPU_FEATURES_9:
890538aaf9SLucas Stach 		*value = gpu->identity.minor_features8;
900538aaf9SLucas Stach 		break;
910538aaf9SLucas Stach 
920538aaf9SLucas Stach 	case ETNAVIV_PARAM_GPU_FEATURES_10:
930538aaf9SLucas Stach 		*value = gpu->identity.minor_features9;
940538aaf9SLucas Stach 		break;
950538aaf9SLucas Stach 
960538aaf9SLucas Stach 	case ETNAVIV_PARAM_GPU_FEATURES_11:
970538aaf9SLucas Stach 		*value = gpu->identity.minor_features10;
980538aaf9SLucas Stach 		break;
990538aaf9SLucas Stach 
1000538aaf9SLucas Stach 	case ETNAVIV_PARAM_GPU_FEATURES_12:
1010538aaf9SLucas Stach 		*value = gpu->identity.minor_features11;
1020538aaf9SLucas Stach 		break;
1030538aaf9SLucas Stach 
104a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_STREAM_COUNT:
105a8c21a54SThe etnaviv authors 		*value = gpu->identity.stream_count;
106a8c21a54SThe etnaviv authors 		break;
107a8c21a54SThe etnaviv authors 
108a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_REGISTER_MAX:
109a8c21a54SThe etnaviv authors 		*value = gpu->identity.register_max;
110a8c21a54SThe etnaviv authors 		break;
111a8c21a54SThe etnaviv authors 
112a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_THREAD_COUNT:
113a8c21a54SThe etnaviv authors 		*value = gpu->identity.thread_count;
114a8c21a54SThe etnaviv authors 		break;
115a8c21a54SThe etnaviv authors 
116a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
117a8c21a54SThe etnaviv authors 		*value = gpu->identity.vertex_cache_size;
118a8c21a54SThe etnaviv authors 		break;
119a8c21a54SThe etnaviv authors 
120a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
121a8c21a54SThe etnaviv authors 		*value = gpu->identity.shader_core_count;
122a8c21a54SThe etnaviv authors 		break;
123a8c21a54SThe etnaviv authors 
124a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
125a8c21a54SThe etnaviv authors 		*value = gpu->identity.pixel_pipes;
126a8c21a54SThe etnaviv authors 		break;
127a8c21a54SThe etnaviv authors 
128a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
129a8c21a54SThe etnaviv authors 		*value = gpu->identity.vertex_output_buffer_size;
130a8c21a54SThe etnaviv authors 		break;
131a8c21a54SThe etnaviv authors 
132a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
133a8c21a54SThe etnaviv authors 		*value = gpu->identity.buffer_size;
134a8c21a54SThe etnaviv authors 		break;
135a8c21a54SThe etnaviv authors 
136a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
137a8c21a54SThe etnaviv authors 		*value = gpu->identity.instruction_count;
138a8c21a54SThe etnaviv authors 		break;
139a8c21a54SThe etnaviv authors 
140a8c21a54SThe etnaviv authors 	case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
141a8c21a54SThe etnaviv authors 		*value = gpu->identity.num_constants;
142a8c21a54SThe etnaviv authors 		break;
143a8c21a54SThe etnaviv authors 
144602eb489SRussell King 	case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
145602eb489SRussell King 		*value = gpu->identity.varyings_count;
146602eb489SRussell King 		break;
147602eb489SRussell King 
148088880ddSLucas Stach 	case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
149088880ddSLucas Stach 		if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
150088880ddSLucas Stach 			*value = ETNAVIV_SOFTPIN_START_ADDRESS;
151088880ddSLucas Stach 		else
152088880ddSLucas Stach 			*value = ~0ULL;
153088880ddSLucas Stach 		break;
154088880ddSLucas Stach 
1551ff79a4aSChristian Gmeiner 	case ETNAVIV_PARAM_GPU_PRODUCT_ID:
1561ff79a4aSChristian Gmeiner 		*value = gpu->identity.product_id;
1571ff79a4aSChristian Gmeiner 		break;
1581ff79a4aSChristian Gmeiner 
1591ff79a4aSChristian Gmeiner 	case ETNAVIV_PARAM_GPU_CUSTOMER_ID:
1601ff79a4aSChristian Gmeiner 		*value = gpu->identity.customer_id;
1611ff79a4aSChristian Gmeiner 		break;
1621ff79a4aSChristian Gmeiner 
1631ff79a4aSChristian Gmeiner 	case ETNAVIV_PARAM_GPU_ECO_ID:
1641ff79a4aSChristian Gmeiner 		*value = gpu->identity.eco_id;
1651ff79a4aSChristian Gmeiner 		break;
1661ff79a4aSChristian Gmeiner 
167a8c21a54SThe etnaviv authors 	default:
168a8c21a54SThe etnaviv authors 		DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
169a8c21a54SThe etnaviv authors 		return -EINVAL;
170a8c21a54SThe etnaviv authors 	}
171a8c21a54SThe etnaviv authors 
172a8c21a54SThe etnaviv authors 	return 0;
173a8c21a54SThe etnaviv authors }
174a8c21a54SThe etnaviv authors 
175472f79dcSRussell King 
176472f79dcSRussell King #define etnaviv_is_model_rev(gpu, mod, rev) \
177472f79dcSRussell King 	((gpu)->identity.model == chipModel_##mod && \
178472f79dcSRussell King 	 (gpu)->identity.revision == rev)
17952f36ba1SRussell King #define etnaviv_field(val, field) \
18052f36ba1SRussell King 	(((val) & field##__MASK) >> field##__SHIFT)
18152f36ba1SRussell King 
etnaviv_hw_specs(struct etnaviv_gpu * gpu)182a8c21a54SThe etnaviv authors static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
183a8c21a54SThe etnaviv authors {
184a8c21a54SThe etnaviv authors 	if (gpu->identity.minor_features0 &
185a8c21a54SThe etnaviv authors 	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
186602eb489SRussell King 		u32 specs[4];
187602eb489SRussell King 		unsigned int streams;
188a8c21a54SThe etnaviv authors 
189a8c21a54SThe etnaviv authors 		specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
190a8c21a54SThe etnaviv authors 		specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
191602eb489SRussell King 		specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
192602eb489SRussell King 		specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
193a8c21a54SThe etnaviv authors 
19452f36ba1SRussell King 		gpu->identity.stream_count = etnaviv_field(specs[0],
19552f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_STREAM_COUNT);
19652f36ba1SRussell King 		gpu->identity.register_max = etnaviv_field(specs[0],
19752f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_REGISTER_MAX);
19852f36ba1SRussell King 		gpu->identity.thread_count = etnaviv_field(specs[0],
19952f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_THREAD_COUNT);
20052f36ba1SRussell King 		gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
20152f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
20252f36ba1SRussell King 		gpu->identity.shader_core_count = etnaviv_field(specs[0],
20352f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
20452f36ba1SRussell King 		gpu->identity.pixel_pipes = etnaviv_field(specs[0],
20552f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
206a8c21a54SThe etnaviv authors 		gpu->identity.vertex_output_buffer_size =
20752f36ba1SRussell King 			etnaviv_field(specs[0],
20852f36ba1SRussell King 				VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
209a8c21a54SThe etnaviv authors 
21052f36ba1SRussell King 		gpu->identity.buffer_size = etnaviv_field(specs[1],
21152f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
21252f36ba1SRussell King 		gpu->identity.instruction_count = etnaviv_field(specs[1],
21352f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
21452f36ba1SRussell King 		gpu->identity.num_constants = etnaviv_field(specs[1],
21552f36ba1SRussell King 					VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
216602eb489SRussell King 
217602eb489SRussell King 		gpu->identity.varyings_count = etnaviv_field(specs[2],
218602eb489SRussell King 					VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
219602eb489SRussell King 
220602eb489SRussell King 		/* This overrides the value from older register if non-zero */
221602eb489SRussell King 		streams = etnaviv_field(specs[3],
222602eb489SRussell King 					VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
223602eb489SRussell King 		if (streams)
224602eb489SRussell King 			gpu->identity.stream_count = streams;
225a8c21a54SThe etnaviv authors 	}
226a8c21a54SThe etnaviv authors 
227a8c21a54SThe etnaviv authors 	/* Fill in the stream count if not specified */
228a8c21a54SThe etnaviv authors 	if (gpu->identity.stream_count == 0) {
229a8c21a54SThe etnaviv authors 		if (gpu->identity.model >= 0x1000)
230a8c21a54SThe etnaviv authors 			gpu->identity.stream_count = 4;
231a8c21a54SThe etnaviv authors 		else
232a8c21a54SThe etnaviv authors 			gpu->identity.stream_count = 1;
233a8c21a54SThe etnaviv authors 	}
234a8c21a54SThe etnaviv authors 
235a8c21a54SThe etnaviv authors 	/* Convert the register max value */
236a8c21a54SThe etnaviv authors 	if (gpu->identity.register_max)
237a8c21a54SThe etnaviv authors 		gpu->identity.register_max = 1 << gpu->identity.register_max;
238507f8991SRussell King 	else if (gpu->identity.model == chipModel_GC400)
239a8c21a54SThe etnaviv authors 		gpu->identity.register_max = 32;
240a8c21a54SThe etnaviv authors 	else
241a8c21a54SThe etnaviv authors 		gpu->identity.register_max = 64;
242a8c21a54SThe etnaviv authors 
243a8c21a54SThe etnaviv authors 	/* Convert thread count */
244a8c21a54SThe etnaviv authors 	if (gpu->identity.thread_count)
245a8c21a54SThe etnaviv authors 		gpu->identity.thread_count = 1 << gpu->identity.thread_count;
246507f8991SRussell King 	else if (gpu->identity.model == chipModel_GC400)
247a8c21a54SThe etnaviv authors 		gpu->identity.thread_count = 64;
248507f8991SRussell King 	else if (gpu->identity.model == chipModel_GC500 ||
249507f8991SRussell King 		 gpu->identity.model == chipModel_GC530)
250a8c21a54SThe etnaviv authors 		gpu->identity.thread_count = 128;
251a8c21a54SThe etnaviv authors 	else
252a8c21a54SThe etnaviv authors 		gpu->identity.thread_count = 256;
253a8c21a54SThe etnaviv authors 
254a8c21a54SThe etnaviv authors 	if (gpu->identity.vertex_cache_size == 0)
255a8c21a54SThe etnaviv authors 		gpu->identity.vertex_cache_size = 8;
256a8c21a54SThe etnaviv authors 
257a8c21a54SThe etnaviv authors 	if (gpu->identity.shader_core_count == 0) {
258a8c21a54SThe etnaviv authors 		if (gpu->identity.model >= 0x1000)
259a8c21a54SThe etnaviv authors 			gpu->identity.shader_core_count = 2;
260a8c21a54SThe etnaviv authors 		else
261a8c21a54SThe etnaviv authors 			gpu->identity.shader_core_count = 1;
262a8c21a54SThe etnaviv authors 	}
263a8c21a54SThe etnaviv authors 
264a8c21a54SThe etnaviv authors 	if (gpu->identity.pixel_pipes == 0)
265a8c21a54SThe etnaviv authors 		gpu->identity.pixel_pipes = 1;
266a8c21a54SThe etnaviv authors 
267a8c21a54SThe etnaviv authors 	/* Convert virtex buffer size */
268a8c21a54SThe etnaviv authors 	if (gpu->identity.vertex_output_buffer_size) {
269a8c21a54SThe etnaviv authors 		gpu->identity.vertex_output_buffer_size =
270a8c21a54SThe etnaviv authors 			1 << gpu->identity.vertex_output_buffer_size;
271507f8991SRussell King 	} else if (gpu->identity.model == chipModel_GC400) {
272a8c21a54SThe etnaviv authors 		if (gpu->identity.revision < 0x4000)
273a8c21a54SThe etnaviv authors 			gpu->identity.vertex_output_buffer_size = 512;
274a8c21a54SThe etnaviv authors 		else if (gpu->identity.revision < 0x4200)
275a8c21a54SThe etnaviv authors 			gpu->identity.vertex_output_buffer_size = 256;
276a8c21a54SThe etnaviv authors 		else
277a8c21a54SThe etnaviv authors 			gpu->identity.vertex_output_buffer_size = 128;
278a8c21a54SThe etnaviv authors 	} else {
279a8c21a54SThe etnaviv authors 		gpu->identity.vertex_output_buffer_size = 512;
280a8c21a54SThe etnaviv authors 	}
281a8c21a54SThe etnaviv authors 
282a8c21a54SThe etnaviv authors 	switch (gpu->identity.instruction_count) {
283a8c21a54SThe etnaviv authors 	case 0:
284472f79dcSRussell King 		if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
285507f8991SRussell King 		    gpu->identity.model == chipModel_GC880)
286a8c21a54SThe etnaviv authors 			gpu->identity.instruction_count = 512;
287a8c21a54SThe etnaviv authors 		else
288a8c21a54SThe etnaviv authors 			gpu->identity.instruction_count = 256;
289a8c21a54SThe etnaviv authors 		break;
290a8c21a54SThe etnaviv authors 
291a8c21a54SThe etnaviv authors 	case 1:
292a8c21a54SThe etnaviv authors 		gpu->identity.instruction_count = 1024;
293a8c21a54SThe etnaviv authors 		break;
294a8c21a54SThe etnaviv authors 
295a8c21a54SThe etnaviv authors 	case 2:
296a8c21a54SThe etnaviv authors 		gpu->identity.instruction_count = 2048;
297a8c21a54SThe etnaviv authors 		break;
298a8c21a54SThe etnaviv authors 
299a8c21a54SThe etnaviv authors 	default:
300a8c21a54SThe etnaviv authors 		gpu->identity.instruction_count = 256;
301a8c21a54SThe etnaviv authors 		break;
302a8c21a54SThe etnaviv authors 	}
303a8c21a54SThe etnaviv authors 
304a8c21a54SThe etnaviv authors 	if (gpu->identity.num_constants == 0)
305a8c21a54SThe etnaviv authors 		gpu->identity.num_constants = 168;
306602eb489SRussell King 
307602eb489SRussell King 	if (gpu->identity.varyings_count == 0) {
308602eb489SRussell King 		if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
309602eb489SRussell King 			gpu->identity.varyings_count = 12;
310602eb489SRussell King 		else
311602eb489SRussell King 			gpu->identity.varyings_count = 8;
312602eb489SRussell King 	}
313602eb489SRussell King 
314602eb489SRussell King 	/*
315602eb489SRussell King 	 * For some cores, two varyings are consumed for position, so the
316602eb489SRussell King 	 * maximum varying count needs to be reduced by one.
317602eb489SRussell King 	 */
318602eb489SRussell King 	if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
319602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
320602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
321602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
322602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
323602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
324602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
325602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
326602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
327602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
328602eb489SRussell King 	    etnaviv_is_model_rev(gpu, GC880, 0x5106))
329602eb489SRussell King 		gpu->identity.varyings_count -= 1;
330a8c21a54SThe etnaviv authors }
331a8c21a54SThe etnaviv authors 
etnaviv_hw_identify(struct etnaviv_gpu * gpu)332a8c21a54SThe etnaviv authors static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
333a8c21a54SThe etnaviv authors {
334a8c21a54SThe etnaviv authors 	u32 chipIdentity;
335a8c21a54SThe etnaviv authors 
336a8c21a54SThe etnaviv authors 	chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
337a8c21a54SThe etnaviv authors 
338a8c21a54SThe etnaviv authors 	/* Special case for older graphic cores. */
33952f36ba1SRussell King 	if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
340507f8991SRussell King 		gpu->identity.model    = chipModel_GC500;
34152f36ba1SRussell King 		gpu->identity.revision = etnaviv_field(chipIdentity,
34252f36ba1SRussell King 					 VIVS_HI_CHIP_IDENTITY_REVISION);
343a8c21a54SThe etnaviv authors 	} else {
344815e45bbSChristian Gmeiner 		u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
345a8c21a54SThe etnaviv authors 
346a8c21a54SThe etnaviv authors 		gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
347a8c21a54SThe etnaviv authors 		gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
348815e45bbSChristian Gmeiner 		gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
3492c5bf028SChristian Gmeiner 
3502c5bf028SChristian Gmeiner 		/*
3512c5bf028SChristian Gmeiner 		 * Reading these two registers on GC600 rev 0x19 result in a
3522c5bf028SChristian Gmeiner 		 * unhandled fault: external abort on non-linefetch
3532c5bf028SChristian Gmeiner 		 */
3542c5bf028SChristian Gmeiner 		if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
3552c5bf028SChristian Gmeiner 			gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
356815e45bbSChristian Gmeiner 			gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
3572c5bf028SChristian Gmeiner 		}
358a8c21a54SThe etnaviv authors 
359a8c21a54SThe etnaviv authors 		/*
360a8c21a54SThe etnaviv authors 		 * !!!! HACK ALERT !!!!
361a8c21a54SThe etnaviv authors 		 * Because people change device IDs without letting software
362a8c21a54SThe etnaviv authors 		 * know about it - here is the hack to make it all look the
363a8c21a54SThe etnaviv authors 		 * same.  Only for GC400 family.
364a8c21a54SThe etnaviv authors 		 */
365a8c21a54SThe etnaviv authors 		if ((gpu->identity.model & 0xff00) == 0x0400 &&
366507f8991SRussell King 		    gpu->identity.model != chipModel_GC420) {
367a8c21a54SThe etnaviv authors 			gpu->identity.model = gpu->identity.model & 0x0400;
368a8c21a54SThe etnaviv authors 		}
369a8c21a54SThe etnaviv authors 
370a8c21a54SThe etnaviv authors 		/* Another special case */
371472f79dcSRussell King 		if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
372a8c21a54SThe etnaviv authors 			u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
373a8c21a54SThe etnaviv authors 
374a8c21a54SThe etnaviv authors 			if (chipDate == 0x20080814 && chipTime == 0x12051100) {
375a8c21a54SThe etnaviv authors 				/*
376a8c21a54SThe etnaviv authors 				 * This IP has an ECO; put the correct
377a8c21a54SThe etnaviv authors 				 * revision in it.
378a8c21a54SThe etnaviv authors 				 */
379a8c21a54SThe etnaviv authors 				gpu->identity.revision = 0x1051;
380a8c21a54SThe etnaviv authors 			}
381a8c21a54SThe etnaviv authors 		}
38212ff4bdeSLucas Stach 
38312ff4bdeSLucas Stach 		/*
38412ff4bdeSLucas Stach 		 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
38512ff4bdeSLucas Stach 		 * reality it's just a re-branded GC3000. We can identify this
38612ff4bdeSLucas Stach 		 * core by the upper half of the revision register being all 1.
38712ff4bdeSLucas Stach 		 * Fix model/rev here, so all other places can refer to this
38812ff4bdeSLucas Stach 		 * core by its real identity.
38912ff4bdeSLucas Stach 		 */
39012ff4bdeSLucas Stach 		if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
39112ff4bdeSLucas Stach 			gpu->identity.model = chipModel_GC3000;
39212ff4bdeSLucas Stach 			gpu->identity.revision &= 0xffff;
39312ff4bdeSLucas Stach 		}
394815e45bbSChristian Gmeiner 
395815e45bbSChristian Gmeiner 		if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
396815e45bbSChristian Gmeiner 			gpu->identity.eco_id = 1;
397815e45bbSChristian Gmeiner 
398815e45bbSChristian Gmeiner 		if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
399815e45bbSChristian Gmeiner 			gpu->identity.eco_id = 1;
400a8c21a54SThe etnaviv authors 	}
401a8c21a54SThe etnaviv authors 
402a8c21a54SThe etnaviv authors 	dev_info(gpu->dev, "model: GC%x, revision: %x\n",
403a8c21a54SThe etnaviv authors 		 gpu->identity.model, gpu->identity.revision);
404a8c21a54SThe etnaviv authors 
4052b76f5beSLucas Stach 	gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
406681c19c8SLucas Stach 	/*
407681c19c8SLucas Stach 	 * If there is a match in the HWDB, we aren't interested in the
408681c19c8SLucas Stach 	 * remaining register values, as they might be wrong.
409681c19c8SLucas Stach 	 */
410681c19c8SLucas Stach 	if (etnaviv_fill_identity_from_hwdb(gpu))
411681c19c8SLucas Stach 		return;
412681c19c8SLucas Stach 
413a8c21a54SThe etnaviv authors 	gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
414a8c21a54SThe etnaviv authors 
415a8c21a54SThe etnaviv authors 	/* Disable fast clear on GC700. */
416507f8991SRussell King 	if (gpu->identity.model == chipModel_GC700)
417a8c21a54SThe etnaviv authors 		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
418a8c21a54SThe etnaviv authors 
419cc7d3fb4SDoug Brown 	/* These models/revisions don't have the 2D pipe bit */
420cc7d3fb4SDoug Brown 	if ((gpu->identity.model == chipModel_GC500 &&
421cc7d3fb4SDoug Brown 	     gpu->identity.revision <= 2) ||
422cc7d3fb4SDoug Brown 	    gpu->identity.model == chipModel_GC300)
423cc7d3fb4SDoug Brown 		gpu->identity.features |= chipFeatures_PIPE_2D;
424cc7d3fb4SDoug Brown 
425507f8991SRussell King 	if ((gpu->identity.model == chipModel_GC500 &&
426507f8991SRussell King 	     gpu->identity.revision < 2) ||
427507f8991SRussell King 	    (gpu->identity.model == chipModel_GC300 &&
428507f8991SRussell King 	     gpu->identity.revision < 0x2000)) {
429a8c21a54SThe etnaviv authors 
430a8c21a54SThe etnaviv authors 		/*
431a8c21a54SThe etnaviv authors 		 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
432a8c21a54SThe etnaviv authors 		 * registers.
433a8c21a54SThe etnaviv authors 		 */
434a8c21a54SThe etnaviv authors 		gpu->identity.minor_features0 = 0;
435a8c21a54SThe etnaviv authors 		gpu->identity.minor_features1 = 0;
436a8c21a54SThe etnaviv authors 		gpu->identity.minor_features2 = 0;
437a8c21a54SThe etnaviv authors 		gpu->identity.minor_features3 = 0;
438602eb489SRussell King 		gpu->identity.minor_features4 = 0;
439602eb489SRussell King 		gpu->identity.minor_features5 = 0;
440a8c21a54SThe etnaviv authors 	} else
441a8c21a54SThe etnaviv authors 		gpu->identity.minor_features0 =
442a8c21a54SThe etnaviv authors 				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
443a8c21a54SThe etnaviv authors 
444a8c21a54SThe etnaviv authors 	if (gpu->identity.minor_features0 &
445a8c21a54SThe etnaviv authors 	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
446a8c21a54SThe etnaviv authors 		gpu->identity.minor_features1 =
447a8c21a54SThe etnaviv authors 				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
448a8c21a54SThe etnaviv authors 		gpu->identity.minor_features2 =
449a8c21a54SThe etnaviv authors 				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
450a8c21a54SThe etnaviv authors 		gpu->identity.minor_features3 =
451a8c21a54SThe etnaviv authors 				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
452602eb489SRussell King 		gpu->identity.minor_features4 =
453602eb489SRussell King 				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
454602eb489SRussell King 		gpu->identity.minor_features5 =
455602eb489SRussell King 				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
456a8c21a54SThe etnaviv authors 	}
457a8c21a54SThe etnaviv authors 
458cc7d3fb4SDoug Brown 	/* GC600/300 idle register reports zero bits where modules aren't present */
459cc7d3fb4SDoug Brown 	if (gpu->identity.model == chipModel_GC600 ||
460cc7d3fb4SDoug Brown 	    gpu->identity.model == chipModel_GC300)
461a8c21a54SThe etnaviv authors 		gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
462a8c21a54SThe etnaviv authors 				 VIVS_HI_IDLE_STATE_RA |
463a8c21a54SThe etnaviv authors 				 VIVS_HI_IDLE_STATE_SE |
464a8c21a54SThe etnaviv authors 				 VIVS_HI_IDLE_STATE_PA |
465a8c21a54SThe etnaviv authors 				 VIVS_HI_IDLE_STATE_SH |
466a8c21a54SThe etnaviv authors 				 VIVS_HI_IDLE_STATE_PE |
467a8c21a54SThe etnaviv authors 				 VIVS_HI_IDLE_STATE_DE |
468a8c21a54SThe etnaviv authors 				 VIVS_HI_IDLE_STATE_FE;
469a8c21a54SThe etnaviv authors 
470a8c21a54SThe etnaviv authors 	etnaviv_hw_specs(gpu);
471a8c21a54SThe etnaviv authors }
472a8c21a54SThe etnaviv authors 
etnaviv_gpu_load_clock(struct etnaviv_gpu * gpu,u32 clock)473a8c21a54SThe etnaviv authors static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
474a8c21a54SThe etnaviv authors {
475a8c21a54SThe etnaviv authors 	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
476a8c21a54SThe etnaviv authors 		  VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
477a8c21a54SThe etnaviv authors 	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
478a8c21a54SThe etnaviv authors }
479a8c21a54SThe etnaviv authors 
etnaviv_gpu_update_clock(struct etnaviv_gpu * gpu)480bcdfb5e5SRussell King static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
481bcdfb5e5SRussell King {
482d79fd1ccSLucas Stach 	if (gpu->identity.minor_features2 &
483d79fd1ccSLucas Stach 	    chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
484d79fd1ccSLucas Stach 		clk_set_rate(gpu->clk_core,
485d79fd1ccSLucas Stach 			     gpu->base_rate_core >> gpu->freq_scale);
486d79fd1ccSLucas Stach 		clk_set_rate(gpu->clk_shader,
487d79fd1ccSLucas Stach 			     gpu->base_rate_shader >> gpu->freq_scale);
488d79fd1ccSLucas Stach 	} else {
489bcdfb5e5SRussell King 		unsigned int fscale = 1 << (6 - gpu->freq_scale);
4906eb3ecc3SLucas Stach 		u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
491bcdfb5e5SRussell King 
4926eb3ecc3SLucas Stach 		clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
4936eb3ecc3SLucas Stach 		clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
494bcdfb5e5SRussell King 		etnaviv_gpu_load_clock(gpu, clock);
495bcdfb5e5SRussell King 	}
496295b6c02SLucas Stach 
497295b6c02SLucas Stach 	/*
498295b6c02SLucas Stach 	 * Choose number of wait cycles to target a ~30us (1/32768) max latency
499295b6c02SLucas Stach 	 * until new work is picked up by the FE when it polls in the idle loop.
500295b6c02SLucas Stach 	 * If the GPU base frequency is unknown use 200 wait cycles.
501295b6c02SLucas Stach 	 */
502295b6c02SLucas Stach 	gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale),
503295b6c02SLucas Stach 				   200UL, 0xffffUL);
504d79fd1ccSLucas Stach }
505bcdfb5e5SRussell King 
etnaviv_hw_reset(struct etnaviv_gpu * gpu)506a8c21a54SThe etnaviv authors static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
507a8c21a54SThe etnaviv authors {
508a8c21a54SThe etnaviv authors 	u32 control, idle;
509a8c21a54SThe etnaviv authors 	unsigned long timeout;
510a8c21a54SThe etnaviv authors 	bool failed = true;
511a8c21a54SThe etnaviv authors 
512a8c21a54SThe etnaviv authors 	/* We hope that the GPU resets in under one second */
513a8c21a54SThe etnaviv authors 	timeout = jiffies + msecs_to_jiffies(1000);
514a8c21a54SThe etnaviv authors 
515a8c21a54SThe etnaviv authors 	while (time_is_after_jiffies(timeout)) {
516a8c21a54SThe etnaviv authors 		/* enable clock */
5176eb3ecc3SLucas Stach 		unsigned int fscale = 1 << (6 - gpu->freq_scale);
5186eb3ecc3SLucas Stach 		control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
5196eb3ecc3SLucas Stach 		etnaviv_gpu_load_clock(gpu, control);
520a8c21a54SThe etnaviv authors 
521a8c21a54SThe etnaviv authors 		/* isolate the GPU. */
522a8c21a54SThe etnaviv authors 		control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
523a8c21a54SThe etnaviv authors 		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
524a8c21a54SThe etnaviv authors 
525c997c3dfSLucas Stach 		if (gpu->sec_mode == ETNA_SEC_KERNEL) {
526c997c3dfSLucas Stach 			gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
527c997c3dfSLucas Stach 			          VIVS_MMUv2_AHB_CONTROL_RESET);
528c997c3dfSLucas Stach 		} else {
529a8c21a54SThe etnaviv authors 			/* set soft reset. */
530a8c21a54SThe etnaviv authors 			control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
531a8c21a54SThe etnaviv authors 			gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
532c997c3dfSLucas Stach 		}
533a8c21a54SThe etnaviv authors 
534a8c21a54SThe etnaviv authors 		/* wait for reset. */
53540462179SPhilipp Zabel 		usleep_range(10, 20);
536a8c21a54SThe etnaviv authors 
537a8c21a54SThe etnaviv authors 		/* reset soft reset bit. */
538a8c21a54SThe etnaviv authors 		control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
539a8c21a54SThe etnaviv authors 		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
540a8c21a54SThe etnaviv authors 
541a8c21a54SThe etnaviv authors 		/* reset GPU isolation. */
542a8c21a54SThe etnaviv authors 		control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
543a8c21a54SThe etnaviv authors 		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
544a8c21a54SThe etnaviv authors 
545a8c21a54SThe etnaviv authors 		/* read idle register. */
546a8c21a54SThe etnaviv authors 		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
547a8c21a54SThe etnaviv authors 
548ea4ed4a5SGuido Günther 		/* try resetting again if FE is not idle */
549a8c21a54SThe etnaviv authors 		if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
550a8c21a54SThe etnaviv authors 			dev_dbg(gpu->dev, "FE is not idle\n");
551a8c21a54SThe etnaviv authors 			continue;
552a8c21a54SThe etnaviv authors 		}
553a8c21a54SThe etnaviv authors 
554a8c21a54SThe etnaviv authors 		/* read reset register. */
555a8c21a54SThe etnaviv authors 		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
556a8c21a54SThe etnaviv authors 
557a8c21a54SThe etnaviv authors 		/* is the GPU idle? */
558a8c21a54SThe etnaviv authors 		if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
559a8c21a54SThe etnaviv authors 		    ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
560a8c21a54SThe etnaviv authors 			dev_dbg(gpu->dev, "GPU is not idle\n");
561a8c21a54SThe etnaviv authors 			continue;
562a8c21a54SThe etnaviv authors 		}
563a8c21a54SThe etnaviv authors 
5646eb3ecc3SLucas Stach 		/* disable debug registers, as they are not normally needed */
5656eb3ecc3SLucas Stach 		control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
5666eb3ecc3SLucas Stach 		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
5676eb3ecc3SLucas Stach 
568a8c21a54SThe etnaviv authors 		failed = false;
569a8c21a54SThe etnaviv authors 		break;
570a8c21a54SThe etnaviv authors 	}
571a8c21a54SThe etnaviv authors 
572a8c21a54SThe etnaviv authors 	if (failed) {
573a8c21a54SThe etnaviv authors 		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
574a8c21a54SThe etnaviv authors 		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
575a8c21a54SThe etnaviv authors 
576a8c21a54SThe etnaviv authors 		dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
577a8c21a54SThe etnaviv authors 			idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
578a8c21a54SThe etnaviv authors 			control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
579a8c21a54SThe etnaviv authors 			control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
580a8c21a54SThe etnaviv authors 
581a8c21a54SThe etnaviv authors 		return -EBUSY;
582a8c21a54SThe etnaviv authors 	}
583a8c21a54SThe etnaviv authors 
584a8c21a54SThe etnaviv authors 	/* We rely on the GPU running, so program the clock */
585bcdfb5e5SRussell King 	etnaviv_gpu_update_clock(gpu);
586a8c21a54SThe etnaviv authors 
587647d817dSLucas Stach 	gpu->state = ETNA_GPU_STATE_RESET;
588725cbc78SLucas Stach 	gpu->exec_state = -1;
589f978a530SLucas Stach 	if (gpu->mmu_context)
590f978a530SLucas Stach 		etnaviv_iommu_context_put(gpu->mmu_context);
591725cbc78SLucas Stach 	gpu->mmu_context = NULL;
59223e0f5a5SLucas Stach 
593a8c21a54SThe etnaviv authors 	return 0;
594a8c21a54SThe etnaviv authors }
595a8c21a54SThe etnaviv authors 
etnaviv_gpu_enable_mlcg(struct etnaviv_gpu * gpu)5967d0c6e71SRussell King static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
5977d0c6e71SRussell King {
5987d0c6e71SRussell King 	u32 pmc, ppc;
5997d0c6e71SRussell King 
6007d0c6e71SRussell King 	/* enable clock gating */
60161a6920bSDoug Brown 	ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
6027d0c6e71SRussell King 	ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
6037d0c6e71SRussell King 
6047d0c6e71SRussell King 	/* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
6057d0c6e71SRussell King 	if (gpu->identity.revision == 0x4301 ||
6067d0c6e71SRussell King 	    gpu->identity.revision == 0x4302)
6077d0c6e71SRussell King 		ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
6087d0c6e71SRussell King 
60961a6920bSDoug Brown 	gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, ppc);
6107d0c6e71SRussell King 
61161a6920bSDoug Brown 	pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS);
6127d0c6e71SRussell King 
6137cef6004SLucas Stach 	/* Disable PA clock gating for GC400+ without bugfix except for GC420 */
6147d0c6e71SRussell King 	if (gpu->identity.model >= chipModel_GC400 &&
6157cef6004SLucas Stach 	    gpu->identity.model != chipModel_GC420 &&
6167cef6004SLucas Stach 	    !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
6177d0c6e71SRussell King 		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
6187d0c6e71SRussell King 
6197d0c6e71SRussell King 	/*
6207d0c6e71SRussell King 	 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
6217d0c6e71SRussell King 	 * present without a bug fix.
6227d0c6e71SRussell King 	 */
6237d0c6e71SRussell King 	if (gpu->identity.revision < 0x5000 &&
6247d0c6e71SRussell King 	    gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
6257d0c6e71SRussell King 	    !(gpu->identity.minor_features1 &
6267d0c6e71SRussell King 	      chipMinorFeatures1_DISABLE_PE_GATING))
6277d0c6e71SRussell King 		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
6287d0c6e71SRussell King 
6297d0c6e71SRussell King 	if (gpu->identity.revision < 0x5422)
6307d0c6e71SRussell King 		pmc |= BIT(15); /* Unknown bit */
6317d0c6e71SRussell King 
6327cef6004SLucas Stach 	/* Disable TX clock gating on affected core revisions. */
6337cef6004SLucas Stach 	if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
6344bce2442SMarco Felsch 	    etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
6359208e9c8SDerek Foreman 	    etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
6369208e9c8SDerek Foreman 	    etnaviv_is_model_rev(gpu, GC7000, 0x6203))
6377cef6004SLucas Stach 		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
6387cef6004SLucas Stach 
6394bce2442SMarco Felsch 	/* Disable SE and RA clock gating on affected core revisions. */
640432f51e7SMichael Walle 	if (etnaviv_is_model_rev(gpu, GC7000, 0x6202))
641432f51e7SMichael Walle 		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE |
6424bce2442SMarco Felsch 		       VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA;
643432f51e7SMichael Walle 
6447d0c6e71SRussell King 	pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
6457d0c6e71SRussell King 	pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
6467d0c6e71SRussell King 
64761a6920bSDoug Brown 	gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
6487d0c6e71SRussell King }
6497d0c6e71SRussell King 
etnaviv_gpu_start_fe(struct etnaviv_gpu * gpu,u32 address,u16 prefetch)650229855b6SLucas Stach void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
651229855b6SLucas Stach {
652229855b6SLucas Stach 	gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
653229855b6SLucas Stach 	gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
654229855b6SLucas Stach 		  VIVS_FE_COMMAND_CONTROL_ENABLE |
655229855b6SLucas Stach 		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
656c997c3dfSLucas Stach 
657c997c3dfSLucas Stach 	if (gpu->sec_mode == ETNA_SEC_KERNEL) {
658c997c3dfSLucas Stach 		gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
659c997c3dfSLucas Stach 			  VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
660c997c3dfSLucas Stach 			  VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
661c997c3dfSLucas Stach 	}
662229855b6SLucas Stach }
663229855b6SLucas Stach 
etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)664d6408538SLucas Stach static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
665d6408538SLucas Stach 					  struct etnaviv_iommu_context *context)
666d80d842aSLucas Stach {
667d80d842aSLucas Stach 	u16 prefetch;
668d6408538SLucas Stach 	u32 address;
669d80d842aSLucas Stach 
670647d817dSLucas Stach 	WARN_ON(gpu->state != ETNA_GPU_STATE_INITIALIZED);
671647d817dSLucas Stach 
672d80d842aSLucas Stach 	/* setup the MMU */
673d6408538SLucas Stach 	etnaviv_iommu_restore(gpu, context);
674d80d842aSLucas Stach 
675d80d842aSLucas Stach 	/* Start command processor */
676d80d842aSLucas Stach 	prefetch = etnaviv_buffer_init(gpu);
677d6408538SLucas Stach 	address = etnaviv_cmdbuf_get_va(&gpu->buffer,
678d6408538SLucas Stach 					&gpu->mmu_context->cmdbuf_mapping);
679d80d842aSLucas Stach 
680d80d842aSLucas Stach 	etnaviv_gpu_start_fe(gpu, address, prefetch);
681647d817dSLucas Stach 
682647d817dSLucas Stach 	gpu->state = ETNA_GPU_STATE_RUNNING;
683d80d842aSLucas Stach }
684d80d842aSLucas Stach 
etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu * gpu)685e17a0dedSWladimir J. van der Laan static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
686e17a0dedSWladimir J. van der Laan {
687e17a0dedSWladimir J. van der Laan 	/*
688e17a0dedSWladimir J. van der Laan 	 * Base value for VIVS_PM_PULSE_EATER register on models where it
689e17a0dedSWladimir J. van der Laan 	 * cannot be read, extracted from vivante kernel driver.
690e17a0dedSWladimir J. van der Laan 	 */
691e17a0dedSWladimir J. van der Laan 	u32 pulse_eater = 0x01590880;
692e17a0dedSWladimir J. van der Laan 
693e17a0dedSWladimir J. van der Laan 	if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
694e17a0dedSWladimir J. van der Laan 	    etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
695e17a0dedSWladimir J. van der Laan 		pulse_eater |= BIT(23);
696e17a0dedSWladimir J. van der Laan 
697e17a0dedSWladimir J. van der Laan 	}
698e17a0dedSWladimir J. van der Laan 
699e17a0dedSWladimir J. van der Laan 	if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
700e17a0dedSWladimir J. van der Laan 	    etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
701e17a0dedSWladimir J. van der Laan 		pulse_eater &= ~BIT(16);
702e17a0dedSWladimir J. van der Laan 		pulse_eater |= BIT(17);
703e17a0dedSWladimir J. van der Laan 	}
704e17a0dedSWladimir J. van der Laan 
705e17a0dedSWladimir J. van der Laan 	if ((gpu->identity.revision > 0x5420) &&
706e17a0dedSWladimir J. van der Laan 	    (gpu->identity.features & chipFeatures_PIPE_3D))
707e17a0dedSWladimir J. van der Laan 	{
708e17a0dedSWladimir J. van der Laan 		/* Performance fix: disable internal DFS */
70961a6920bSDoug Brown 		pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER);
710e17a0dedSWladimir J. van der Laan 		pulse_eater |= BIT(18);
711e17a0dedSWladimir J. van der Laan 	}
712e17a0dedSWladimir J. van der Laan 
71361a6920bSDoug Brown 	gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
714e17a0dedSWladimir J. van der Laan }
715e17a0dedSWladimir J. van der Laan 
etnaviv_gpu_hw_init(struct etnaviv_gpu * gpu)716a8c21a54SThe etnaviv authors static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
717a8c21a54SThe etnaviv authors {
718647d817dSLucas Stach 	WARN_ON(!(gpu->state == ETNA_GPU_STATE_IDENTIFIED ||
719647d817dSLucas Stach 		  gpu->state == ETNA_GPU_STATE_RESET));
720647d817dSLucas Stach 
721472f79dcSRussell King 	if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
722472f79dcSRussell King 	     etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
723472f79dcSRussell King 	    gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
724a8c21a54SThe etnaviv authors 		u32 mc_memory_debug;
725a8c21a54SThe etnaviv authors 
726a8c21a54SThe etnaviv authors 		mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
727a8c21a54SThe etnaviv authors 
728a8c21a54SThe etnaviv authors 		if (gpu->identity.revision == 0x5007)
729a8c21a54SThe etnaviv authors 			mc_memory_debug |= 0x0c;
730a8c21a54SThe etnaviv authors 		else
731a8c21a54SThe etnaviv authors 			mc_memory_debug |= 0x08;
732a8c21a54SThe etnaviv authors 
733a8c21a54SThe etnaviv authors 		gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
734a8c21a54SThe etnaviv authors 	}
735a8c21a54SThe etnaviv authors 
7367d0c6e71SRussell King 	/* enable module-level clock gating */
7377d0c6e71SRussell King 	etnaviv_gpu_enable_mlcg(gpu);
7387d0c6e71SRussell King 
739a8c21a54SThe etnaviv authors 	/*
740a8c21a54SThe etnaviv authors 	 * Update GPU AXI cache atttribute to "cacheable, no allocate".
741a8c21a54SThe etnaviv authors 	 * This is necessary to prevent the iMX6 SoC locking up.
742a8c21a54SThe etnaviv authors 	 */
743a8c21a54SThe etnaviv authors 	gpu_write(gpu, VIVS_HI_AXI_CONFIG,
744a8c21a54SThe etnaviv authors 		  VIVS_HI_AXI_CONFIG_AWCACHE(2) |
745a8c21a54SThe etnaviv authors 		  VIVS_HI_AXI_CONFIG_ARCACHE(2));
746a8c21a54SThe etnaviv authors 
747a8c21a54SThe etnaviv authors 	/* GC2000 rev 5108 needs a special bus config */
748472f79dcSRussell King 	if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
749a8c21a54SThe etnaviv authors 		u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
750a8c21a54SThe etnaviv authors 		bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
751a8c21a54SThe etnaviv authors 				VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
752a8c21a54SThe etnaviv authors 		bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
753a8c21a54SThe etnaviv authors 			      VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
754a8c21a54SThe etnaviv authors 		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
755a8c21a54SThe etnaviv authors 	}
756a8c21a54SThe etnaviv authors 
757c997c3dfSLucas Stach 	if (gpu->sec_mode == ETNA_SEC_KERNEL) {
758c997c3dfSLucas Stach 		u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
759c997c3dfSLucas Stach 		val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
760c997c3dfSLucas Stach 		gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
761c997c3dfSLucas Stach 	}
762c997c3dfSLucas Stach 
763e17a0dedSWladimir J. van der Laan 	/* setup the pulse eater */
764e17a0dedSWladimir J. van der Laan 	etnaviv_gpu_setup_pulse_eater(gpu);
765e17a0dedSWladimir J. van der Laan 
766a8c21a54SThe etnaviv authors 	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
767647d817dSLucas Stach 
768647d817dSLucas Stach 	gpu->state = ETNA_GPU_STATE_INITIALIZED;
769a8c21a54SThe etnaviv authors }
770a8c21a54SThe etnaviv authors 
etnaviv_gpu_init(struct etnaviv_gpu * gpu)771a8c21a54SThe etnaviv authors int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
772a8c21a54SThe etnaviv authors {
773bffe5db8SLucas Stach 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
7744bfdd2aaSLucas Stach 	dma_addr_t cmdbuf_paddr;
775a8c21a54SThe etnaviv authors 	int ret, i;
776a8c21a54SThe etnaviv authors 
777a8c21a54SThe etnaviv authors 	ret = pm_runtime_get_sync(gpu->dev);
7781409df04SLucas Stach 	if (ret < 0) {
7791409df04SLucas Stach 		dev_err(gpu->dev, "Failed to enable GPU power domain\n");
780c5d5a32eSNavid Emamdoost 		goto pm_put;
7811409df04SLucas Stach 	}
782a8c21a54SThe etnaviv authors 
783a8c21a54SThe etnaviv authors 	etnaviv_hw_identify(gpu);
784a8c21a54SThe etnaviv authors 
785a8c21a54SThe etnaviv authors 	if (gpu->identity.model == 0) {
786a8c21a54SThe etnaviv authors 		dev_err(gpu->dev, "Unknown GPU model\n");
787f6427760SRussell King 		ret = -ENXIO;
788f6427760SRussell King 		goto fail;
789a8c21a54SThe etnaviv authors 	}
790a8c21a54SThe etnaviv authors 
791d801e6f4STomeu Vizoso 	if (gpu->identity.nn_core_count > 0)
792d801e6f4STomeu Vizoso 		dev_warn(gpu->dev, "etnaviv has been instantiated on a NPU, "
793d801e6f4STomeu Vizoso                                    "for which the UAPI is still experimental\n");
794d801e6f4STomeu Vizoso 
795b98c6688SRussell King 	/* Exclude VG cores with FE2.0 */
796b98c6688SRussell King 	if (gpu->identity.features & chipFeatures_PIPE_VG &&
797b98c6688SRussell King 	    gpu->identity.features & chipFeatures_FE20) {
798b98c6688SRussell King 		dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
799b98c6688SRussell King 		ret = -ENXIO;
800b98c6688SRussell King 		goto fail;
801b98c6688SRussell King 	}
802b98c6688SRussell King 
8032144fff7SLucas Stach 	/*
804c997c3dfSLucas Stach 	 * On cores with security features supported, we claim control over the
805c997c3dfSLucas Stach 	 * security states.
806c997c3dfSLucas Stach 	 */
807c997c3dfSLucas Stach 	if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
808c997c3dfSLucas Stach 	    (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
809c997c3dfSLucas Stach 		gpu->sec_mode = ETNA_SEC_KERNEL;
810c997c3dfSLucas Stach 
811647d817dSLucas Stach 	gpu->state = ETNA_GPU_STATE_IDENTIFIED;
812647d817dSLucas Stach 
813a8c21a54SThe etnaviv authors 	ret = etnaviv_hw_reset(gpu);
8141409df04SLucas Stach 	if (ret) {
8151409df04SLucas Stach 		dev_err(gpu->dev, "GPU reset failed\n");
816a8c21a54SThe etnaviv authors 		goto fail;
8171409df04SLucas Stach 	}
818a8c21a54SThe etnaviv authors 
81927b67278SLucas Stach 	ret = etnaviv_iommu_global_init(gpu);
82027b67278SLucas Stach 	if (ret)
821a8c21a54SThe etnaviv authors 		goto fail;
82227b67278SLucas Stach 
82317e4660aSLucas Stach 	/*
824b72af445SLucas Stach 	 * If the GPU is part of a system with DMA addressing limitations,
825b72af445SLucas Stach 	 * request pages for our SHM backend buffers from the DMA32 zone to
826b72af445SLucas Stach 	 * hopefully avoid performance killing SWIOTLB bounce buffering.
827b72af445SLucas Stach 	 */
828b72af445SLucas Stach 	if (dma_addressing_limited(gpu->dev))
829b72af445SLucas Stach 		priv->shm_gfp_mask |= GFP_DMA32;
830b72af445SLucas Stach 
831a8c21a54SThe etnaviv authors 	/* Create buffer: */
832bffe5db8SLucas Stach 	ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
8332f9225dbSLucas Stach 				  PAGE_SIZE);
8342f9225dbSLucas Stach 	if (ret) {
835a8c21a54SThe etnaviv authors 		dev_err(gpu->dev, "could not create command buffer\n");
83617e4660aSLucas Stach 		goto fail;
837a8c21a54SThe etnaviv authors 	}
838a8c21a54SThe etnaviv authors 
8394bfdd2aaSLucas Stach 	/*
8404bfdd2aaSLucas Stach 	 * Set the GPU linear window to cover the cmdbuf region, as the GPU
8414bfdd2aaSLucas Stach 	 * won't be able to start execution otherwise. The alignment to 128M is
8424bfdd2aaSLucas Stach 	 * chosen arbitrarily but helps in debugging, as the MMU offset
8434bfdd2aaSLucas Stach 	 * calculations are much more straight forward this way.
8444bfdd2aaSLucas Stach 	 *
8454bfdd2aaSLucas Stach 	 * On MC1.0 cores the linear window offset is ignored by the TS engine,
8464bfdd2aaSLucas Stach 	 * leading to inconsistent memory views. Avoid using the offset on those
8474bfdd2aaSLucas Stach 	 * cores if possible, otherwise disable the TS feature.
8484bfdd2aaSLucas Stach 	 */
8494bfdd2aaSLucas Stach 	cmdbuf_paddr = ALIGN_DOWN(etnaviv_cmdbuf_get_pa(&gpu->buffer), SZ_128M);
8504bfdd2aaSLucas Stach 
8514bfdd2aaSLucas Stach 	if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
8524bfdd2aaSLucas Stach 	    (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
8534bfdd2aaSLucas Stach 		if (cmdbuf_paddr >= SZ_2G)
8544bfdd2aaSLucas Stach 			priv->mmu_global->memory_base = SZ_2G;
8554bfdd2aaSLucas Stach 		else
8564bfdd2aaSLucas Stach 			priv->mmu_global->memory_base = cmdbuf_paddr;
8574bfdd2aaSLucas Stach 	} else if (cmdbuf_paddr + SZ_128M >= SZ_2G) {
8584bfdd2aaSLucas Stach 		dev_info(gpu->dev,
8594bfdd2aaSLucas Stach 			 "Need to move linear window on MC1.0, disabling TS\n");
8604bfdd2aaSLucas Stach 		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
8614bfdd2aaSLucas Stach 		priv->mmu_global->memory_base = SZ_2G;
8624bfdd2aaSLucas Stach 	}
8634bfdd2aaSLucas Stach 
864a8c21a54SThe etnaviv authors 	/* Setup event management */
865a8c21a54SThe etnaviv authors 	spin_lock_init(&gpu->event_spinlock);
866a8c21a54SThe etnaviv authors 	init_completion(&gpu->event_free);
867355502e0SChristian Gmeiner 	bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
868355502e0SChristian Gmeiner 	for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
869a8c21a54SThe etnaviv authors 		complete(&gpu->event_free);
870a8c21a54SThe etnaviv authors 
871a8c21a54SThe etnaviv authors 	/* Now program the hardware */
872a8c21a54SThe etnaviv authors 	mutex_lock(&gpu->lock);
873a8c21a54SThe etnaviv authors 	etnaviv_gpu_hw_init(gpu);
874a8c21a54SThe etnaviv authors 	mutex_unlock(&gpu->lock);
875a8c21a54SThe etnaviv authors 
876a8c21a54SThe etnaviv authors 	pm_runtime_mark_last_busy(gpu->dev);
877a8c21a54SThe etnaviv authors 	pm_runtime_put_autosuspend(gpu->dev);
878a8c21a54SThe etnaviv authors 
879a8c21a54SThe etnaviv authors 	return 0;
880a8c21a54SThe etnaviv authors 
881a8c21a54SThe etnaviv authors fail:
882a8c21a54SThe etnaviv authors 	pm_runtime_mark_last_busy(gpu->dev);
883c5d5a32eSNavid Emamdoost pm_put:
884a8c21a54SThe etnaviv authors 	pm_runtime_put_autosuspend(gpu->dev);
885a8c21a54SThe etnaviv authors 
886a8c21a54SThe etnaviv authors 	return ret;
887a8c21a54SThe etnaviv authors }
888a8c21a54SThe etnaviv authors 
889a8c21a54SThe etnaviv authors #ifdef CONFIG_DEBUG_FS
890a8c21a54SThe etnaviv authors struct dma_debug {
891a8c21a54SThe etnaviv authors 	u32 address[2];
892a8c21a54SThe etnaviv authors 	u32 state[2];
893a8c21a54SThe etnaviv authors };
894a8c21a54SThe etnaviv authors 
verify_dma(struct etnaviv_gpu * gpu,struct dma_debug * debug)895a8c21a54SThe etnaviv authors static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
896a8c21a54SThe etnaviv authors {
897a8c21a54SThe etnaviv authors 	u32 i;
898a8c21a54SThe etnaviv authors 
899a8c21a54SThe etnaviv authors 	debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
900a8c21a54SThe etnaviv authors 	debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
901a8c21a54SThe etnaviv authors 
902a8c21a54SThe etnaviv authors 	for (i = 0; i < 500; i++) {
903a8c21a54SThe etnaviv authors 		debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
904a8c21a54SThe etnaviv authors 		debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
905a8c21a54SThe etnaviv authors 
906a8c21a54SThe etnaviv authors 		if (debug->address[0] != debug->address[1])
907a8c21a54SThe etnaviv authors 			break;
908a8c21a54SThe etnaviv authors 
909a8c21a54SThe etnaviv authors 		if (debug->state[0] != debug->state[1])
910a8c21a54SThe etnaviv authors 			break;
911a8c21a54SThe etnaviv authors 	}
912a8c21a54SThe etnaviv authors }
913a8c21a54SThe etnaviv authors 
etnaviv_gpu_debugfs(struct etnaviv_gpu * gpu,struct seq_file * m)914a8c21a54SThe etnaviv authors int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
915a8c21a54SThe etnaviv authors {
916a8c21a54SThe etnaviv authors 	struct dma_debug debug;
917a8c21a54SThe etnaviv authors 	u32 dma_lo, dma_hi, axi, idle;
918a8c21a54SThe etnaviv authors 	int ret;
919a8c21a54SThe etnaviv authors 
920a8c21a54SThe etnaviv authors 	seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
921a8c21a54SThe etnaviv authors 
922a8c21a54SThe etnaviv authors 	ret = pm_runtime_get_sync(gpu->dev);
923a8c21a54SThe etnaviv authors 	if (ret < 0)
924c5d5a32eSNavid Emamdoost 		goto pm_put;
925a8c21a54SThe etnaviv authors 
926a8c21a54SThe etnaviv authors 	dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
927a8c21a54SThe etnaviv authors 	dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
928a8c21a54SThe etnaviv authors 	axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
929a8c21a54SThe etnaviv authors 	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
930a8c21a54SThe etnaviv authors 
931a8c21a54SThe etnaviv authors 	verify_dma(gpu, &debug);
932a8c21a54SThe etnaviv authors 
93300080663SChristian Gmeiner 	seq_puts(m, "\tidentity\n");
93400080663SChristian Gmeiner 	seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
93500080663SChristian Gmeiner 	seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
93600080663SChristian Gmeiner 	seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
93700080663SChristian Gmeiner 	seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
93800080663SChristian Gmeiner 	seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
93900080663SChristian Gmeiner 
940a8c21a54SThe etnaviv authors 	seq_puts(m, "\tfeatures\n");
9413d9fc642SLucas Stach 	seq_printf(m, "\t major_features: 0x%08x\n",
9423d9fc642SLucas Stach 		   gpu->identity.features);
943a8c21a54SThe etnaviv authors 	seq_printf(m, "\t minor_features0: 0x%08x\n",
944a8c21a54SThe etnaviv authors 		   gpu->identity.minor_features0);
945a8c21a54SThe etnaviv authors 	seq_printf(m, "\t minor_features1: 0x%08x\n",
946a8c21a54SThe etnaviv authors 		   gpu->identity.minor_features1);
947a8c21a54SThe etnaviv authors 	seq_printf(m, "\t minor_features2: 0x%08x\n",
948a8c21a54SThe etnaviv authors 		   gpu->identity.minor_features2);
949a8c21a54SThe etnaviv authors 	seq_printf(m, "\t minor_features3: 0x%08x\n",
950a8c21a54SThe etnaviv authors 		   gpu->identity.minor_features3);
951602eb489SRussell King 	seq_printf(m, "\t minor_features4: 0x%08x\n",
952602eb489SRussell King 		   gpu->identity.minor_features4);
953602eb489SRussell King 	seq_printf(m, "\t minor_features5: 0x%08x\n",
954602eb489SRussell King 		   gpu->identity.minor_features5);
9550538aaf9SLucas Stach 	seq_printf(m, "\t minor_features6: 0x%08x\n",
9560538aaf9SLucas Stach 		   gpu->identity.minor_features6);
9570538aaf9SLucas Stach 	seq_printf(m, "\t minor_features7: 0x%08x\n",
9580538aaf9SLucas Stach 		   gpu->identity.minor_features7);
9590538aaf9SLucas Stach 	seq_printf(m, "\t minor_features8: 0x%08x\n",
9600538aaf9SLucas Stach 		   gpu->identity.minor_features8);
9610538aaf9SLucas Stach 	seq_printf(m, "\t minor_features9: 0x%08x\n",
9620538aaf9SLucas Stach 		   gpu->identity.minor_features9);
9630538aaf9SLucas Stach 	seq_printf(m, "\t minor_features10: 0x%08x\n",
9640538aaf9SLucas Stach 		   gpu->identity.minor_features10);
9650538aaf9SLucas Stach 	seq_printf(m, "\t minor_features11: 0x%08x\n",
9660538aaf9SLucas Stach 		   gpu->identity.minor_features11);
967a8c21a54SThe etnaviv authors 
968a8c21a54SThe etnaviv authors 	seq_puts(m, "\tspecs\n");
969a8c21a54SThe etnaviv authors 	seq_printf(m, "\t stream_count:  %d\n",
970a8c21a54SThe etnaviv authors 			gpu->identity.stream_count);
971a8c21a54SThe etnaviv authors 	seq_printf(m, "\t register_max: %d\n",
972a8c21a54SThe etnaviv authors 			gpu->identity.register_max);
973a8c21a54SThe etnaviv authors 	seq_printf(m, "\t thread_count: %d\n",
974a8c21a54SThe etnaviv authors 			gpu->identity.thread_count);
975a8c21a54SThe etnaviv authors 	seq_printf(m, "\t vertex_cache_size: %d\n",
976a8c21a54SThe etnaviv authors 			gpu->identity.vertex_cache_size);
977a8c21a54SThe etnaviv authors 	seq_printf(m, "\t shader_core_count: %d\n",
978a8c21a54SThe etnaviv authors 			gpu->identity.shader_core_count);
9794c22c61eSLucas Stach 	seq_printf(m, "\t nn_core_count: %d\n",
9804c22c61eSLucas Stach 			gpu->identity.nn_core_count);
981a8c21a54SThe etnaviv authors 	seq_printf(m, "\t pixel_pipes: %d\n",
982a8c21a54SThe etnaviv authors 			gpu->identity.pixel_pipes);
983a8c21a54SThe etnaviv authors 	seq_printf(m, "\t vertex_output_buffer_size: %d\n",
984a8c21a54SThe etnaviv authors 			gpu->identity.vertex_output_buffer_size);
985a8c21a54SThe etnaviv authors 	seq_printf(m, "\t buffer_size: %d\n",
986a8c21a54SThe etnaviv authors 			gpu->identity.buffer_size);
987a8c21a54SThe etnaviv authors 	seq_printf(m, "\t instruction_count: %d\n",
988a8c21a54SThe etnaviv authors 			gpu->identity.instruction_count);
989a8c21a54SThe etnaviv authors 	seq_printf(m, "\t num_constants: %d\n",
990a8c21a54SThe etnaviv authors 			gpu->identity.num_constants);
991602eb489SRussell King 	seq_printf(m, "\t varyings_count: %d\n",
992602eb489SRussell King 			gpu->identity.varyings_count);
993a8c21a54SThe etnaviv authors 
994a8c21a54SThe etnaviv authors 	seq_printf(m, "\taxi: 0x%08x\n", axi);
995a8c21a54SThe etnaviv authors 	seq_printf(m, "\tidle: 0x%08x\n", idle);
996a8c21a54SThe etnaviv authors 	idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
997a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
998a8c21a54SThe etnaviv authors 		seq_puts(m, "\t FE is not idle\n");
999a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
1000a8c21a54SThe etnaviv authors 		seq_puts(m, "\t DE is not idle\n");
1001a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
1002a8c21a54SThe etnaviv authors 		seq_puts(m, "\t PE is not idle\n");
1003a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
1004a8c21a54SThe etnaviv authors 		seq_puts(m, "\t SH is not idle\n");
1005a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
1006a8c21a54SThe etnaviv authors 		seq_puts(m, "\t PA is not idle\n");
1007a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
1008a8c21a54SThe etnaviv authors 		seq_puts(m, "\t SE is not idle\n");
1009a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
1010a8c21a54SThe etnaviv authors 		seq_puts(m, "\t RA is not idle\n");
1011a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
1012a8c21a54SThe etnaviv authors 		seq_puts(m, "\t TX is not idle\n");
1013a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
1014a8c21a54SThe etnaviv authors 		seq_puts(m, "\t VG is not idle\n");
1015a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
1016a8c21a54SThe etnaviv authors 		seq_puts(m, "\t IM is not idle\n");
1017a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
1018a8c21a54SThe etnaviv authors 		seq_puts(m, "\t FP is not idle\n");
1019a8c21a54SThe etnaviv authors 	if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
1020a8c21a54SThe etnaviv authors 		seq_puts(m, "\t TS is not idle\n");
1021b1704551SGuido Günther 	if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
1022b1704551SGuido Günther 		seq_puts(m, "\t BL is not idle\n");
1023b1704551SGuido Günther 	if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
1024b1704551SGuido Günther 		seq_puts(m, "\t ASYNCFE is not idle\n");
1025b1704551SGuido Günther 	if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
1026b1704551SGuido Günther 		seq_puts(m, "\t MC is not idle\n");
1027b1704551SGuido Günther 	if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
1028b1704551SGuido Günther 		seq_puts(m, "\t PPA is not idle\n");
1029b1704551SGuido Günther 	if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
1030b1704551SGuido Günther 		seq_puts(m, "\t WD is not idle\n");
1031b1704551SGuido Günther 	if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
1032b1704551SGuido Günther 		seq_puts(m, "\t NN is not idle\n");
1033b1704551SGuido Günther 	if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
1034b1704551SGuido Günther 		seq_puts(m, "\t TP is not idle\n");
1035a8c21a54SThe etnaviv authors 	if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
1036a8c21a54SThe etnaviv authors 		seq_puts(m, "\t AXI low power mode\n");
1037a8c21a54SThe etnaviv authors 
1038a8c21a54SThe etnaviv authors 	if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
1039a8c21a54SThe etnaviv authors 		u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
1040a8c21a54SThe etnaviv authors 		u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
1041a8c21a54SThe etnaviv authors 		u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
1042a8c21a54SThe etnaviv authors 
1043a8c21a54SThe etnaviv authors 		seq_puts(m, "\tMC\n");
1044a8c21a54SThe etnaviv authors 		seq_printf(m, "\t read0: 0x%08x\n", read0);
1045a8c21a54SThe etnaviv authors 		seq_printf(m, "\t read1: 0x%08x\n", read1);
1046a8c21a54SThe etnaviv authors 		seq_printf(m, "\t write: 0x%08x\n", write);
1047a8c21a54SThe etnaviv authors 	}
1048a8c21a54SThe etnaviv authors 
1049a8c21a54SThe etnaviv authors 	seq_puts(m, "\tDMA ");
1050a8c21a54SThe etnaviv authors 
1051a8c21a54SThe etnaviv authors 	if (debug.address[0] == debug.address[1] &&
1052a8c21a54SThe etnaviv authors 	    debug.state[0] == debug.state[1]) {
1053a8c21a54SThe etnaviv authors 		seq_puts(m, "seems to be stuck\n");
1054a8c21a54SThe etnaviv authors 	} else if (debug.address[0] == debug.address[1]) {
1055c01e0159SMasanari Iida 		seq_puts(m, "address is constant\n");
1056a8c21a54SThe etnaviv authors 	} else {
1057c01e0159SMasanari Iida 		seq_puts(m, "is running\n");
1058a8c21a54SThe etnaviv authors 	}
1059a8c21a54SThe etnaviv authors 
1060a8c21a54SThe etnaviv authors 	seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
1061a8c21a54SThe etnaviv authors 	seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
1062a8c21a54SThe etnaviv authors 	seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
1063a8c21a54SThe etnaviv authors 	seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
1064a8c21a54SThe etnaviv authors 	seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
1065a8c21a54SThe etnaviv authors 		   dma_lo, dma_hi);
1066a8c21a54SThe etnaviv authors 
1067a8c21a54SThe etnaviv authors 	ret = 0;
1068a8c21a54SThe etnaviv authors 
1069a8c21a54SThe etnaviv authors 	pm_runtime_mark_last_busy(gpu->dev);
1070c5d5a32eSNavid Emamdoost pm_put:
1071a8c21a54SThe etnaviv authors 	pm_runtime_put_autosuspend(gpu->dev);
1072a8c21a54SThe etnaviv authors 
1073a8c21a54SThe etnaviv authors 	return ret;
1074a8c21a54SThe etnaviv authors }
1075a8c21a54SThe etnaviv authors #endif
1076a8c21a54SThe etnaviv authors 
1077a8c21a54SThe etnaviv authors /* fence object management */
1078a8c21a54SThe etnaviv authors struct etnaviv_fence {
1079a8c21a54SThe etnaviv authors 	struct etnaviv_gpu *gpu;
1080f54d1867SChris Wilson 	struct dma_fence base;
1081a8c21a54SThe etnaviv authors };
1082a8c21a54SThe etnaviv authors 
to_etnaviv_fence(struct dma_fence * fence)1083f54d1867SChris Wilson static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1084a8c21a54SThe etnaviv authors {
1085a8c21a54SThe etnaviv authors 	return container_of(fence, struct etnaviv_fence, base);
1086a8c21a54SThe etnaviv authors }
1087a8c21a54SThe etnaviv authors 
etnaviv_fence_get_driver_name(struct dma_fence * fence)1088f54d1867SChris Wilson static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1089a8c21a54SThe etnaviv authors {
1090a8c21a54SThe etnaviv authors 	return "etnaviv";
1091a8c21a54SThe etnaviv authors }
1092a8c21a54SThe etnaviv authors 
etnaviv_fence_get_timeline_name(struct dma_fence * fence)1093f54d1867SChris Wilson static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1094a8c21a54SThe etnaviv authors {
1095a8c21a54SThe etnaviv authors 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
1096a8c21a54SThe etnaviv authors 
1097a8c21a54SThe etnaviv authors 	return dev_name(f->gpu->dev);
1098a8c21a54SThe etnaviv authors }
1099a8c21a54SThe etnaviv authors 
etnaviv_fence_signaled(struct dma_fence * fence)1100f54d1867SChris Wilson static bool etnaviv_fence_signaled(struct dma_fence *fence)
1101a8c21a54SThe etnaviv authors {
1102a8c21a54SThe etnaviv authors 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
1103a8c21a54SThe etnaviv authors 
11043283ee77SLucas Stach 	return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
1105a8c21a54SThe etnaviv authors }
1106a8c21a54SThe etnaviv authors 
etnaviv_fence_release(struct dma_fence * fence)1107f54d1867SChris Wilson static void etnaviv_fence_release(struct dma_fence *fence)
1108a8c21a54SThe etnaviv authors {
1109a8c21a54SThe etnaviv authors 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
1110a8c21a54SThe etnaviv authors 
1111a8c21a54SThe etnaviv authors 	kfree_rcu(f, base.rcu);
1112a8c21a54SThe etnaviv authors }
1113a8c21a54SThe etnaviv authors 
1114f54d1867SChris Wilson static const struct dma_fence_ops etnaviv_fence_ops = {
1115a8c21a54SThe etnaviv authors 	.get_driver_name = etnaviv_fence_get_driver_name,
1116a8c21a54SThe etnaviv authors 	.get_timeline_name = etnaviv_fence_get_timeline_name,
1117a8c21a54SThe etnaviv authors 	.signaled = etnaviv_fence_signaled,
1118a8c21a54SThe etnaviv authors 	.release = etnaviv_fence_release,
1119a8c21a54SThe etnaviv authors };
1120a8c21a54SThe etnaviv authors 
etnaviv_gpu_fence_alloc(struct etnaviv_gpu * gpu)1121f54d1867SChris Wilson static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1122a8c21a54SThe etnaviv authors {
1123a8c21a54SThe etnaviv authors 	struct etnaviv_fence *f;
1124a8c21a54SThe etnaviv authors 
1125b27734c2SLucas Stach 	/*
1126b27734c2SLucas Stach 	 * GPU lock must already be held, otherwise fence completion order might
1127b27734c2SLucas Stach 	 * not match the seqno order assigned here.
1128b27734c2SLucas Stach 	 */
1129b27734c2SLucas Stach 	lockdep_assert_held(&gpu->lock);
1130b27734c2SLucas Stach 
1131a8c21a54SThe etnaviv authors 	f = kzalloc(sizeof(*f), GFP_KERNEL);
1132a8c21a54SThe etnaviv authors 	if (!f)
1133a8c21a54SThe etnaviv authors 		return NULL;
1134a8c21a54SThe etnaviv authors 
1135a8c21a54SThe etnaviv authors 	f->gpu = gpu;
1136a8c21a54SThe etnaviv authors 
1137f54d1867SChris Wilson 	dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1138a8c21a54SThe etnaviv authors 		       gpu->fence_context, ++gpu->next_fence);
1139a8c21a54SThe etnaviv authors 
1140a8c21a54SThe etnaviv authors 	return &f->base;
1141a8c21a54SThe etnaviv authors }
1142a8c21a54SThe etnaviv authors 
11433283ee77SLucas Stach /* returns true if fence a comes after fence b */
fence_after(u32 a,u32 b)11443283ee77SLucas Stach static inline bool fence_after(u32 a, u32 b)
11453283ee77SLucas Stach {
11463283ee77SLucas Stach 	return (s32)(a - b) > 0;
11473283ee77SLucas Stach }
11483283ee77SLucas Stach 
1149a8c21a54SThe etnaviv authors /*
1150a8c21a54SThe etnaviv authors  * event management:
1151a8c21a54SThe etnaviv authors  */
1152a8c21a54SThe etnaviv authors 
event_alloc(struct etnaviv_gpu * gpu,unsigned nr_events,unsigned int * events)115395a428c1SChristian Gmeiner static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
115495a428c1SChristian Gmeiner 	unsigned int *events)
1155a8c21a54SThe etnaviv authors {
11565a23144cSLucas Stach 	unsigned long timeout = msecs_to_jiffies(10 * 10000);
1157f098f9b8SLucas Stach 	unsigned i, acquired = 0, rpm_count = 0;
1158f098f9b8SLucas Stach 	int ret;
1159a8c21a54SThe etnaviv authors 
116095a428c1SChristian Gmeiner 	for (i = 0; i < nr_events; i++) {
116188c31d2dSDan Carpenter 		unsigned long remaining;
116295a428c1SChristian Gmeiner 
116388c31d2dSDan Carpenter 		remaining = wait_for_completion_timeout(&gpu->event_free, timeout);
116495a428c1SChristian Gmeiner 
116588c31d2dSDan Carpenter 		if (!remaining) {
1166a8c21a54SThe etnaviv authors 			dev_err(gpu->dev, "wait_for_completion_timeout failed");
1167f098f9b8SLucas Stach 			ret = -EBUSY;
116895a428c1SChristian Gmeiner 			goto out;
116995a428c1SChristian Gmeiner 		}
117095a428c1SChristian Gmeiner 
117195a428c1SChristian Gmeiner 		acquired++;
117288c31d2dSDan Carpenter 		timeout = remaining;
117395a428c1SChristian Gmeiner 	}
1174a8c21a54SThe etnaviv authors 
11755a23144cSLucas Stach 	spin_lock(&gpu->event_spinlock);
1176a8c21a54SThe etnaviv authors 
117795a428c1SChristian Gmeiner 	for (i = 0; i < nr_events; i++) {
117895a428c1SChristian Gmeiner 		int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
117995a428c1SChristian Gmeiner 
118095a428c1SChristian Gmeiner 		events[i] = event;
1181547d340dSChristian Gmeiner 		memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1182355502e0SChristian Gmeiner 		set_bit(event, gpu->event_bitmap);
1183a8c21a54SThe etnaviv authors 	}
1184a8c21a54SThe etnaviv authors 
11855a23144cSLucas Stach 	spin_unlock(&gpu->event_spinlock);
1186a8c21a54SThe etnaviv authors 
1187f098f9b8SLucas Stach 	for (i = 0; i < nr_events; i++) {
1188f098f9b8SLucas Stach 		ret = pm_runtime_resume_and_get(gpu->dev);
1189f098f9b8SLucas Stach 		if (ret)
1190f098f9b8SLucas Stach 			goto out_rpm;
1191f098f9b8SLucas Stach 		rpm_count++;
1192f098f9b8SLucas Stach 	}
1193f098f9b8SLucas Stach 
119495a428c1SChristian Gmeiner 	return 0;
119595a428c1SChristian Gmeiner 
1196f098f9b8SLucas Stach out_rpm:
1197f098f9b8SLucas Stach 	for (i = 0; i < rpm_count; i++)
1198f098f9b8SLucas Stach 		pm_runtime_put_autosuspend(gpu->dev);
119995a428c1SChristian Gmeiner out:
120095a428c1SChristian Gmeiner 	for (i = 0; i < acquired; i++)
120195a428c1SChristian Gmeiner 		complete(&gpu->event_free);
120295a428c1SChristian Gmeiner 
1203f098f9b8SLucas Stach 	return ret;
1204a8c21a54SThe etnaviv authors }
1205a8c21a54SThe etnaviv authors 
event_free(struct etnaviv_gpu * gpu,unsigned int event)1206a8c21a54SThe etnaviv authors static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1207a8c21a54SThe etnaviv authors {
1208355502e0SChristian Gmeiner 	if (!test_bit(event, gpu->event_bitmap)) {
1209a8c21a54SThe etnaviv authors 		dev_warn(gpu->dev, "event %u is already marked as free",
1210a8c21a54SThe etnaviv authors 			 event);
1211a8c21a54SThe etnaviv authors 	} else {
1212355502e0SChristian Gmeiner 		clear_bit(event, gpu->event_bitmap);
1213a8c21a54SThe etnaviv authors 		complete(&gpu->event_free);
1214a8c21a54SThe etnaviv authors 	}
1215f098f9b8SLucas Stach 
1216f098f9b8SLucas Stach 	pm_runtime_put_autosuspend(gpu->dev);
1217a8c21a54SThe etnaviv authors }
1218a8c21a54SThe etnaviv authors 
1219a8c21a54SThe etnaviv authors /*
1220a8c21a54SThe etnaviv authors  * Cmdstream submission/retirement:
1221a8c21a54SThe etnaviv authors  */
etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu * gpu,u32 id,struct drm_etnaviv_timespec * timeout)1222a8c21a54SThe etnaviv authors int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
122338c4a4cfSArnd Bergmann 	u32 id, struct drm_etnaviv_timespec *timeout)
1224a8c21a54SThe etnaviv authors {
12258bc4d885SLucas Stach 	struct dma_fence *fence;
1226a8c21a54SThe etnaviv authors 	int ret;
1227a8c21a54SThe etnaviv authors 
12288bc4d885SLucas Stach 	/*
1229e93b6deeSLucas Stach 	 * Look up the fence and take a reference. We might still find a fence
12308bc4d885SLucas Stach 	 * whose refcount has already dropped to zero. dma_fence_get_rcu
12318bc4d885SLucas Stach 	 * pretends we didn't find a fence in that case.
12328bc4d885SLucas Stach 	 */
1233e93b6deeSLucas Stach 	rcu_read_lock();
1234764be123SLucas Stach 	fence = xa_load(&gpu->user_fences, id);
12358bc4d885SLucas Stach 	if (fence)
12368bc4d885SLucas Stach 		fence = dma_fence_get_rcu(fence);
1237e93b6deeSLucas Stach 	rcu_read_unlock();
12388bc4d885SLucas Stach 
12398bc4d885SLucas Stach 	if (!fence)
12408bc4d885SLucas Stach 		return 0;
1241a8c21a54SThe etnaviv authors 
1242a8c21a54SThe etnaviv authors 	if (!timeout) {
1243a8c21a54SThe etnaviv authors 		/* No timeout was requested: just test for completion */
12448bc4d885SLucas Stach 		ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
1245a8c21a54SThe etnaviv authors 	} else {
1246a8c21a54SThe etnaviv authors 		unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1247a8c21a54SThe etnaviv authors 
12488bc4d885SLucas Stach 		ret = dma_fence_wait_timeout(fence, true, remaining);
12498bc4d885SLucas Stach 		if (ret == 0)
1250a8c21a54SThe etnaviv authors 			ret = -ETIMEDOUT;
12518bc4d885SLucas Stach 		else if (ret != -ERESTARTSYS)
1252a8c21a54SThe etnaviv authors 			ret = 0;
12538bc4d885SLucas Stach 
1254a8c21a54SThe etnaviv authors 	}
1255a8c21a54SThe etnaviv authors 
12568bc4d885SLucas Stach 	dma_fence_put(fence);
1257a8c21a54SThe etnaviv authors 	return ret;
1258a8c21a54SThe etnaviv authors }
1259a8c21a54SThe etnaviv authors 
1260a8c21a54SThe etnaviv authors /*
1261a8c21a54SThe etnaviv authors  * Wait for an object to become inactive.  This, on it's own, is not race
1262e93b6deeSLucas Stach  * free: the object is moved by the scheduler off the active list, and
1263a8c21a54SThe etnaviv authors  * then the iova is put.  Moreover, the object could be re-submitted just
1264a8c21a54SThe etnaviv authors  * after we notice that it's become inactive.
1265a8c21a54SThe etnaviv authors  *
1266a8c21a54SThe etnaviv authors  * Although the retirement happens under the gpu lock, we don't want to hold
1267a8c21a54SThe etnaviv authors  * that lock in this function while waiting.
1268a8c21a54SThe etnaviv authors  */
etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu * gpu,struct etnaviv_gem_object * etnaviv_obj,struct drm_etnaviv_timespec * timeout)1269a8c21a54SThe etnaviv authors int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
127038c4a4cfSArnd Bergmann 	struct etnaviv_gem_object *etnaviv_obj,
127138c4a4cfSArnd Bergmann 	struct drm_etnaviv_timespec *timeout)
1272a8c21a54SThe etnaviv authors {
1273a8c21a54SThe etnaviv authors 	unsigned long remaining;
1274a8c21a54SThe etnaviv authors 	long ret;
1275a8c21a54SThe etnaviv authors 
1276a8c21a54SThe etnaviv authors 	if (!timeout)
1277a8c21a54SThe etnaviv authors 		return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1278a8c21a54SThe etnaviv authors 
1279a8c21a54SThe etnaviv authors 	remaining = etnaviv_timeout_to_jiffies(timeout);
1280a8c21a54SThe etnaviv authors 
1281a8c21a54SThe etnaviv authors 	ret = wait_event_interruptible_timeout(gpu->fence_event,
1282a8c21a54SThe etnaviv authors 					       !is_active(etnaviv_obj),
1283a8c21a54SThe etnaviv authors 					       remaining);
1284fa67ac84SLucas Stach 	if (ret > 0)
1285a8c21a54SThe etnaviv authors 		return 0;
1286fa67ac84SLucas Stach 	else if (ret == -ERESTARTSYS)
1287a8c21a54SThe etnaviv authors 		return -ERESTARTSYS;
1288fa67ac84SLucas Stach 	else
1289a8c21a54SThe etnaviv authors 		return -ETIMEDOUT;
1290a8c21a54SThe etnaviv authors }
1291a8c21a54SThe etnaviv authors 
sync_point_perfmon_sample(struct etnaviv_gpu * gpu,struct etnaviv_event * event,unsigned int flags)129268dc0b29SChristian Gmeiner static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
129368dc0b29SChristian Gmeiner 	struct etnaviv_event *event, unsigned int flags)
129468dc0b29SChristian Gmeiner {
1295ef146c00SLucas Stach 	const struct etnaviv_gem_submit *submit = event->submit;
129668dc0b29SChristian Gmeiner 	unsigned int i;
129768dc0b29SChristian Gmeiner 
1298ef146c00SLucas Stach 	for (i = 0; i < submit->nr_pmrs; i++) {
1299ef146c00SLucas Stach 		const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
130068dc0b29SChristian Gmeiner 
130168dc0b29SChristian Gmeiner 		if (pmr->flags == flags)
13027a9c0fe2SLucas Stach 			etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
130368dc0b29SChristian Gmeiner 	}
130468dc0b29SChristian Gmeiner }
130568dc0b29SChristian Gmeiner 
sync_point_perfmon_sample_pre(struct etnaviv_gpu * gpu,struct etnaviv_event * event)130668dc0b29SChristian Gmeiner static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
130768dc0b29SChristian Gmeiner 	struct etnaviv_event *event)
130868dc0b29SChristian Gmeiner {
13092c8b0c5aSChristian Gmeiner 	u32 val;
13102c8b0c5aSChristian Gmeiner 
13112c8b0c5aSChristian Gmeiner 	/* disable clock gating */
131261a6920bSDoug Brown 	val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
13132c8b0c5aSChristian Gmeiner 	val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
131461a6920bSDoug Brown 	gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
13152c8b0c5aSChristian Gmeiner 
131604a7d18dSChristian Gmeiner 	/* enable debug register */
131704a7d18dSChristian Gmeiner 	val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
131804a7d18dSChristian Gmeiner 	val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
131904a7d18dSChristian Gmeiner 	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
132004a7d18dSChristian Gmeiner 
132168dc0b29SChristian Gmeiner 	sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
132268dc0b29SChristian Gmeiner }
132368dc0b29SChristian Gmeiner 
sync_point_perfmon_sample_post(struct etnaviv_gpu * gpu,struct etnaviv_event * event)132468dc0b29SChristian Gmeiner static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
132568dc0b29SChristian Gmeiner 	struct etnaviv_event *event)
132668dc0b29SChristian Gmeiner {
1327ef146c00SLucas Stach 	const struct etnaviv_gem_submit *submit = event->submit;
132868dc0b29SChristian Gmeiner 	unsigned int i;
13292c8b0c5aSChristian Gmeiner 	u32 val;
133068dc0b29SChristian Gmeiner 
133168dc0b29SChristian Gmeiner 	sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
133268dc0b29SChristian Gmeiner 
1333ef146c00SLucas Stach 	for (i = 0; i < submit->nr_pmrs; i++) {
1334ef146c00SLucas Stach 		const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
133568dc0b29SChristian Gmeiner 
133668dc0b29SChristian Gmeiner 		*pmr->bo_vma = pmr->sequence;
133768dc0b29SChristian Gmeiner 	}
13382c8b0c5aSChristian Gmeiner 
133904a7d18dSChristian Gmeiner 	/* disable debug register */
134004a7d18dSChristian Gmeiner 	val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
134104a7d18dSChristian Gmeiner 	val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
134204a7d18dSChristian Gmeiner 	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
134304a7d18dSChristian Gmeiner 
13442c8b0c5aSChristian Gmeiner 	/* enable clock gating */
134561a6920bSDoug Brown 	val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
13462c8b0c5aSChristian Gmeiner 	val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
134761a6920bSDoug Brown 	gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
134868dc0b29SChristian Gmeiner }
134968dc0b29SChristian Gmeiner 
135068dc0b29SChristian Gmeiner 
1351a8c21a54SThe etnaviv authors /* add bo's to gpu's ring, and kick gpu: */
etnaviv_gpu_submit(struct etnaviv_gem_submit * submit)1352e93b6deeSLucas Stach struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1353a8c21a54SThe etnaviv authors {
1354e93b6deeSLucas Stach 	struct etnaviv_gpu *gpu = submit->gpu;
1355e93b6deeSLucas Stach 	struct dma_fence *gpu_fence;
135668dc0b29SChristian Gmeiner 	unsigned int i, nr_events = 1, event[3];
1357a8c21a54SThe etnaviv authors 	int ret;
1358a8c21a54SThe etnaviv authors 
1359a8c21a54SThe etnaviv authors 	/*
136068dc0b29SChristian Gmeiner 	 * if there are performance monitor requests we need to have
136168dc0b29SChristian Gmeiner 	 * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
136268dc0b29SChristian Gmeiner 	 *   requests.
136368dc0b29SChristian Gmeiner 	 * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
136468dc0b29SChristian Gmeiner 	 *   and update the sequence number for userspace.
136568dc0b29SChristian Gmeiner 	 */
1366ef146c00SLucas Stach 	if (submit->nr_pmrs)
136768dc0b29SChristian Gmeiner 		nr_events = 3;
136868dc0b29SChristian Gmeiner 
136968dc0b29SChristian Gmeiner 	ret = event_alloc(gpu, nr_events, event);
137095a428c1SChristian Gmeiner 	if (ret) {
137168dc0b29SChristian Gmeiner 		DRM_ERROR("no free events\n");
1372c5d5a32eSNavid Emamdoost 		pm_runtime_put_noidle(gpu->dev);
1373e93b6deeSLucas Stach 		return NULL;
1374a8c21a54SThe etnaviv authors 	}
1375a8c21a54SThe etnaviv authors 
1376f3cd1b06SLucas Stach 	mutex_lock(&gpu->lock);
1377f3cd1b06SLucas Stach 
1378e93b6deeSLucas Stach 	gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1379e93b6deeSLucas Stach 	if (!gpu_fence) {
138068dc0b29SChristian Gmeiner 		for (i = 0; i < nr_events; i++)
138168dc0b29SChristian Gmeiner 			event_free(gpu, event[i]);
138268dc0b29SChristian Gmeiner 
138345abdf35SWei Yongjun 		goto out_unlock;
1384a8c21a54SThe etnaviv authors 	}
1385a8c21a54SThe etnaviv authors 
1386647d817dSLucas Stach 	if (gpu->state == ETNA_GPU_STATE_INITIALIZED)
1387d6408538SLucas Stach 		etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
1388d6408538SLucas Stach 
1389cda75329SLucas Stach 	if (submit->prev_mmu_context)
1390cda75329SLucas Stach 		etnaviv_iommu_context_put(submit->prev_mmu_context);
139178edefc0SLucas Stach 	submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
139217e4660aSLucas Stach 
1393ef146c00SLucas Stach 	if (submit->nr_pmrs) {
139468dc0b29SChristian Gmeiner 		gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1395ef146c00SLucas Stach 		kref_get(&submit->refcount);
1396ef146c00SLucas Stach 		gpu->event[event[1]].submit = submit;
139768dc0b29SChristian Gmeiner 		etnaviv_sync_point_queue(gpu, event[1]);
139868dc0b29SChristian Gmeiner 	}
139968dc0b29SChristian Gmeiner 
1400e93b6deeSLucas Stach 	gpu->event[event[0]].fence = gpu_fence;
14016d7a20c0SLucas Stach 	submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
140217e4660aSLucas Stach 	etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
140317e4660aSLucas Stach 			     event[0], &submit->cmdbuf);
140468dc0b29SChristian Gmeiner 
1405ef146c00SLucas Stach 	if (submit->nr_pmrs) {
140668dc0b29SChristian Gmeiner 		gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1407ef146c00SLucas Stach 		kref_get(&submit->refcount);
1408ef146c00SLucas Stach 		gpu->event[event[2]].submit = submit;
140968dc0b29SChristian Gmeiner 		etnaviv_sync_point_queue(gpu, event[2]);
141068dc0b29SChristian Gmeiner 	}
1411a8c21a54SThe etnaviv authors 
141245abdf35SWei Yongjun out_unlock:
1413a8c21a54SThe etnaviv authors 	mutex_unlock(&gpu->lock);
1414a8c21a54SThe etnaviv authors 
1415e93b6deeSLucas Stach 	return gpu_fence;
1416a8c21a54SThe etnaviv authors }
1417a8c21a54SThe etnaviv authors 
sync_point_worker(struct work_struct * work)1418357713ceSChristian Gmeiner static void sync_point_worker(struct work_struct *work)
1419357713ceSChristian Gmeiner {
1420357713ceSChristian Gmeiner 	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1421357713ceSChristian Gmeiner 					       sync_point_work);
1422b9a48aa7SLucas Stach 	struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1423b9a48aa7SLucas Stach 	u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1424357713ceSChristian Gmeiner 
1425b9a48aa7SLucas Stach 	event->sync_point(gpu, event);
1426ef146c00SLucas Stach 	etnaviv_submit_put(event->submit);
1427357713ceSChristian Gmeiner 	event_free(gpu, gpu->sync_point_event);
1428b9a48aa7SLucas Stach 
1429b9a48aa7SLucas Stach 	/* restart FE last to avoid GPU and IRQ racing against this worker */
1430b9a48aa7SLucas Stach 	etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1431357713ceSChristian Gmeiner }
1432357713ceSChristian Gmeiner 
etnaviv_gpu_recover_hang(struct etnaviv_gem_submit * submit)14335cefcf9fSLucas Stach void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit)
14345cefcf9fSLucas Stach {
14355cefcf9fSLucas Stach 	struct etnaviv_gpu *gpu = submit->gpu;
14365cefcf9fSLucas Stach 	char *comm = NULL, *cmd = NULL;
14375cefcf9fSLucas Stach 	struct task_struct *task;
14385cefcf9fSLucas Stach 	unsigned int i;
14395cefcf9fSLucas Stach 
14405cefcf9fSLucas Stach 	dev_err(gpu->dev, "recover hung GPU!\n");
14415cefcf9fSLucas Stach 
14425cefcf9fSLucas Stach 	task = get_pid_task(submit->pid, PIDTYPE_PID);
14435cefcf9fSLucas Stach 	if (task) {
14445cefcf9fSLucas Stach 		comm = kstrdup(task->comm, GFP_KERNEL);
14455cefcf9fSLucas Stach 		cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
14465cefcf9fSLucas Stach 		put_task_struct(task);
14475cefcf9fSLucas Stach 	}
14485cefcf9fSLucas Stach 
14495cefcf9fSLucas Stach 	if (comm && cmd)
14505cefcf9fSLucas Stach 		dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
14515cefcf9fSLucas Stach 
14525cefcf9fSLucas Stach 	kfree(cmd);
14535cefcf9fSLucas Stach 	kfree(comm);
14545cefcf9fSLucas Stach 
14555cefcf9fSLucas Stach 	if (pm_runtime_get_sync(gpu->dev) < 0)
14565cefcf9fSLucas Stach 		goto pm_put;
14575cefcf9fSLucas Stach 
14585cefcf9fSLucas Stach 	mutex_lock(&gpu->lock);
14595cefcf9fSLucas Stach 
14605cefcf9fSLucas Stach 	etnaviv_hw_reset(gpu);
14615cefcf9fSLucas Stach 
14625cefcf9fSLucas Stach 	/* complete all events, the GPU won't do it after the reset */
14635cefcf9fSLucas Stach 	spin_lock(&gpu->event_spinlock);
14645cefcf9fSLucas Stach 	for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
146580f6b63eSLucas Stach 		event_free(gpu, i);
14665cefcf9fSLucas Stach 	spin_unlock(&gpu->event_spinlock);
14675cefcf9fSLucas Stach 
14685cefcf9fSLucas Stach 	etnaviv_gpu_hw_init(gpu);
14695cefcf9fSLucas Stach 
14705cefcf9fSLucas Stach 	mutex_unlock(&gpu->lock);
14715cefcf9fSLucas Stach 	pm_runtime_mark_last_busy(gpu->dev);
14725cefcf9fSLucas Stach pm_put:
14735cefcf9fSLucas Stach 	pm_runtime_put_autosuspend(gpu->dev);
14745cefcf9fSLucas Stach }
14755cefcf9fSLucas Stach 
dump_mmu_fault(struct etnaviv_gpu * gpu)14764df3000eSLucas Stach static void dump_mmu_fault(struct etnaviv_gpu *gpu)
14774df3000eSLucas Stach {
1478b4bc0e74SChristian Gmeiner 	static const char *fault_reasons[] = {
1479b4bc0e74SChristian Gmeiner 		"slave not present",
1480b4bc0e74SChristian Gmeiner 		"page not present",
1481b4bc0e74SChristian Gmeiner 		"write violation",
1482b4bc0e74SChristian Gmeiner 		"out of bounds",
1483b4bc0e74SChristian Gmeiner 		"read security violation",
1484b4bc0e74SChristian Gmeiner 		"write security violation",
1485b4bc0e74SChristian Gmeiner 	};
1486b4bc0e74SChristian Gmeiner 
1487c997c3dfSLucas Stach 	u32 status_reg, status;
14884df3000eSLucas Stach 	int i;
14894df3000eSLucas Stach 
1490c997c3dfSLucas Stach 	if (gpu->sec_mode == ETNA_SEC_NONE)
1491c997c3dfSLucas Stach 		status_reg = VIVS_MMUv2_STATUS;
1492c997c3dfSLucas Stach 	else
1493c997c3dfSLucas Stach 		status_reg = VIVS_MMUv2_SEC_STATUS;
1494c997c3dfSLucas Stach 
1495c997c3dfSLucas Stach 	status = gpu_read(gpu, status_reg);
14964df3000eSLucas Stach 	dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
14974df3000eSLucas Stach 
14984df3000eSLucas Stach 	for (i = 0; i < 4; i++) {
1499b4bc0e74SChristian Gmeiner 		const char *reason = "unknown";
1500c997c3dfSLucas Stach 		u32 address_reg;
1501b4bc0e74SChristian Gmeiner 		u32 mmu_status;
1502c997c3dfSLucas Stach 
1503b4bc0e74SChristian Gmeiner 		mmu_status = (status >> (i * 4)) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK;
1504b4bc0e74SChristian Gmeiner 		if (!mmu_status)
15054df3000eSLucas Stach 			continue;
15064df3000eSLucas Stach 
1507b4bc0e74SChristian Gmeiner 		if ((mmu_status - 1) < ARRAY_SIZE(fault_reasons))
1508b4bc0e74SChristian Gmeiner 			reason = fault_reasons[mmu_status - 1];
1509b4bc0e74SChristian Gmeiner 
1510c997c3dfSLucas Stach 		if (gpu->sec_mode == ETNA_SEC_NONE)
1511c997c3dfSLucas Stach 			address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
1512c997c3dfSLucas Stach 		else
1513c997c3dfSLucas Stach 			address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
1514c997c3dfSLucas Stach 
1515b4bc0e74SChristian Gmeiner 		dev_err_ratelimited(gpu->dev,
1516b4bc0e74SChristian Gmeiner 				    "MMU %d fault (%s) addr 0x%08x\n",
1517b4bc0e74SChristian Gmeiner 				    i, reason, gpu_read(gpu, address_reg));
15184df3000eSLucas Stach 	}
15194df3000eSLucas Stach }
15204df3000eSLucas Stach 
irq_handler(int irq,void * data)1521a8c21a54SThe etnaviv authors static irqreturn_t irq_handler(int irq, void *data)
1522a8c21a54SThe etnaviv authors {
1523a8c21a54SThe etnaviv authors 	struct etnaviv_gpu *gpu = data;
1524a8c21a54SThe etnaviv authors 	irqreturn_t ret = IRQ_NONE;
1525a8c21a54SThe etnaviv authors 
1526a8c21a54SThe etnaviv authors 	u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1527a8c21a54SThe etnaviv authors 
1528a8c21a54SThe etnaviv authors 	if (intr != 0) {
1529a8c21a54SThe etnaviv authors 		int event;
1530a8c21a54SThe etnaviv authors 
1531a8c21a54SThe etnaviv authors 		pm_runtime_mark_last_busy(gpu->dev);
1532a8c21a54SThe etnaviv authors 
1533a8c21a54SThe etnaviv authors 		dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1534a8c21a54SThe etnaviv authors 
1535a8c21a54SThe etnaviv authors 		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1536a8c21a54SThe etnaviv authors 			dev_err(gpu->dev, "AXI bus error\n");
1537a8c21a54SThe etnaviv authors 			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1538a8c21a54SThe etnaviv authors 		}
1539a8c21a54SThe etnaviv authors 
1540128a9b1dSLucas Stach 		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
15414df3000eSLucas Stach 			dump_mmu_fault(gpu);
15429ec2afdeSLucas Stach 			gpu->state = ETNA_GPU_STATE_FAULT;
15439ec2afdeSLucas Stach 			drm_sched_fault(&gpu->sched);
1544128a9b1dSLucas Stach 			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1545128a9b1dSLucas Stach 		}
1546128a9b1dSLucas Stach 
1547a8c21a54SThe etnaviv authors 		while ((event = ffs(intr)) != 0) {
1548f54d1867SChris Wilson 			struct dma_fence *fence;
1549a8c21a54SThe etnaviv authors 
1550a8c21a54SThe etnaviv authors 			event -= 1;
1551a8c21a54SThe etnaviv authors 
1552a8c21a54SThe etnaviv authors 			intr &= ~(1 << event);
1553a8c21a54SThe etnaviv authors 
1554a8c21a54SThe etnaviv authors 			dev_dbg(gpu->dev, "event %u\n", event);
1555a8c21a54SThe etnaviv authors 
1556357713ceSChristian Gmeiner 			if (gpu->event[event].sync_point) {
1557357713ceSChristian Gmeiner 				gpu->sync_point_event = event;
1558a7790d78SLucas Stach 				queue_work(gpu->wq, &gpu->sync_point_work);
1559357713ceSChristian Gmeiner 			}
1560357713ceSChristian Gmeiner 
1561a8c21a54SThe etnaviv authors 			fence = gpu->event[event].fence;
156268dc0b29SChristian Gmeiner 			if (!fence)
156368dc0b29SChristian Gmeiner 				continue;
156468dc0b29SChristian Gmeiner 
1565a8c21a54SThe etnaviv authors 			gpu->event[event].fence = NULL;
1566a8c21a54SThe etnaviv authors 
1567a8c21a54SThe etnaviv authors 			/*
1568a8c21a54SThe etnaviv authors 			 * Events can be processed out of order.  Eg,
1569a8c21a54SThe etnaviv authors 			 * - allocate and queue event 0
1570a8c21a54SThe etnaviv authors 			 * - allocate event 1
1571a8c21a54SThe etnaviv authors 			 * - event 0 completes, we process it
1572a8c21a54SThe etnaviv authors 			 * - allocate and queue event 0
1573a8c21a54SThe etnaviv authors 			 * - event 1 and event 0 complete
1574a8c21a54SThe etnaviv authors 			 * we can end up processing event 0 first, then 1.
1575a8c21a54SThe etnaviv authors 			 */
1576a8c21a54SThe etnaviv authors 			if (fence_after(fence->seqno, gpu->completed_fence))
1577a8c21a54SThe etnaviv authors 				gpu->completed_fence = fence->seqno;
15788bc4d885SLucas Stach 			dma_fence_signal(fence);
1579a8c21a54SThe etnaviv authors 
1580a8c21a54SThe etnaviv authors 			event_free(gpu, event);
1581a8c21a54SThe etnaviv authors 		}
1582a8c21a54SThe etnaviv authors 
1583a8c21a54SThe etnaviv authors 		ret = IRQ_HANDLED;
1584a8c21a54SThe etnaviv authors 	}
1585a8c21a54SThe etnaviv authors 
1586a8c21a54SThe etnaviv authors 	return ret;
1587a8c21a54SThe etnaviv authors }
1588a8c21a54SThe etnaviv authors 
etnaviv_gpu_clk_enable(struct etnaviv_gpu * gpu)1589a8c21a54SThe etnaviv authors static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1590a8c21a54SThe etnaviv authors {
1591a8c21a54SThe etnaviv authors 	int ret;
1592a8c21a54SThe etnaviv authors 
159365f037e8SLucas Stach 	ret = clk_prepare_enable(gpu->clk_reg);
159465f037e8SLucas Stach 	if (ret)
159565f037e8SLucas Stach 		return ret;
159665f037e8SLucas Stach 
15979c7310c0SLucas Stach 	ret = clk_prepare_enable(gpu->clk_bus);
1598a8c21a54SThe etnaviv authors 	if (ret)
1599f8794feaSLubomir Rintel 		goto disable_clk_reg;
1600a8c21a54SThe etnaviv authors 
16019c7310c0SLucas Stach 	ret = clk_prepare_enable(gpu->clk_core);
16029c7310c0SLucas Stach 	if (ret)
16039c7310c0SLucas Stach 		goto disable_clk_bus;
16049c7310c0SLucas Stach 
16059c7310c0SLucas Stach 	ret = clk_prepare_enable(gpu->clk_shader);
16069c7310c0SLucas Stach 	if (ret)
16079c7310c0SLucas Stach 		goto disable_clk_core;
16089c7310c0SLucas Stach 
1609a8c21a54SThe etnaviv authors 	return 0;
16109c7310c0SLucas Stach 
16119c7310c0SLucas Stach disable_clk_core:
16129c7310c0SLucas Stach 	clk_disable_unprepare(gpu->clk_core);
16139c7310c0SLucas Stach disable_clk_bus:
16149c7310c0SLucas Stach 	clk_disable_unprepare(gpu->clk_bus);
1615f8794feaSLubomir Rintel disable_clk_reg:
1616f8794feaSLubomir Rintel 	clk_disable_unprepare(gpu->clk_reg);
16179c7310c0SLucas Stach 
16189c7310c0SLucas Stach 	return ret;
1619a8c21a54SThe etnaviv authors }
1620a8c21a54SThe etnaviv authors 
etnaviv_gpu_clk_disable(struct etnaviv_gpu * gpu)1621a8c21a54SThe etnaviv authors static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1622a8c21a54SThe etnaviv authors {
16239c7310c0SLucas Stach 	clk_disable_unprepare(gpu->clk_shader);
16249c7310c0SLucas Stach 	clk_disable_unprepare(gpu->clk_core);
16259c7310c0SLucas Stach 	clk_disable_unprepare(gpu->clk_bus);
162665f037e8SLucas Stach 	clk_disable_unprepare(gpu->clk_reg);
1627a8c21a54SThe etnaviv authors 
1628a8c21a54SThe etnaviv authors 	return 0;
1629a8c21a54SThe etnaviv authors }
1630a8c21a54SThe etnaviv authors 
etnaviv_gpu_wait_idle(struct etnaviv_gpu * gpu,unsigned int timeout_ms)1631b88163e3SLucas Stach int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1632b88163e3SLucas Stach {
1633b88163e3SLucas Stach 	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1634b88163e3SLucas Stach 
1635b88163e3SLucas Stach 	do {
1636b88163e3SLucas Stach 		u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1637b88163e3SLucas Stach 
1638b88163e3SLucas Stach 		if ((idle & gpu->idle_mask) == gpu->idle_mask)
1639b88163e3SLucas Stach 			return 0;
1640b88163e3SLucas Stach 
1641b88163e3SLucas Stach 		if (time_is_before_jiffies(timeout)) {
1642b88163e3SLucas Stach 			dev_warn(gpu->dev,
1643b88163e3SLucas Stach 				 "timed out waiting for idle: idle=0x%x\n",
1644b88163e3SLucas Stach 				 idle);
1645b88163e3SLucas Stach 			return -ETIMEDOUT;
1646b88163e3SLucas Stach 		}
1647b88163e3SLucas Stach 
1648b88163e3SLucas Stach 		udelay(5);
1649b88163e3SLucas Stach 	} while (1);
1650b88163e3SLucas Stach }
1651b88163e3SLucas Stach 
etnaviv_gpu_hw_suspend(struct etnaviv_gpu * gpu)16527cb54494SLucas Stach static void etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1653a8c21a54SThe etnaviv authors {
1654647d817dSLucas Stach 	if (gpu->state == ETNA_GPU_STATE_RUNNING) {
1655a8c21a54SThe etnaviv authors 		/* Replace the last WAIT with END */
165640c27bdeSLucas Stach 		mutex_lock(&gpu->lock);
1657a8c21a54SThe etnaviv authors 		etnaviv_buffer_end(gpu);
165840c27bdeSLucas Stach 		mutex_unlock(&gpu->lock);
1659a8c21a54SThe etnaviv authors 
1660a8c21a54SThe etnaviv authors 		/*
1661a8c21a54SThe etnaviv authors 		 * We know that only the FE is busy here, this should
1662a8c21a54SThe etnaviv authors 		 * happen quickly (as the WAIT is only 200 cycles).  If
1663a8c21a54SThe etnaviv authors 		 * we fail, just warn and continue.
1664a8c21a54SThe etnaviv authors 		 */
1665b88163e3SLucas Stach 		etnaviv_gpu_wait_idle(gpu, 100);
166617e4660aSLucas Stach 
1667647d817dSLucas Stach 		gpu->state = ETNA_GPU_STATE_INITIALIZED;
1668a8c21a54SThe etnaviv authors 	}
1669a8c21a54SThe etnaviv authors 
167017e4660aSLucas Stach 	gpu->exec_state = -1;
1671a8c21a54SThe etnaviv authors }
1672a8c21a54SThe etnaviv authors 
etnaviv_gpu_hw_resume(struct etnaviv_gpu * gpu)1673a8c21a54SThe etnaviv authors static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1674a8c21a54SThe etnaviv authors {
1675a8c21a54SThe etnaviv authors 	int ret;
1676a8c21a54SThe etnaviv authors 
1677a8c21a54SThe etnaviv authors 	ret = mutex_lock_killable(&gpu->lock);
1678a8c21a54SThe etnaviv authors 	if (ret)
1679a8c21a54SThe etnaviv authors 		return ret;
1680a8c21a54SThe etnaviv authors 
1681bcdfb5e5SRussell King 	etnaviv_gpu_update_clock(gpu);
1682a8c21a54SThe etnaviv authors 	etnaviv_gpu_hw_init(gpu);
1683a8c21a54SThe etnaviv authors 
1684a8c21a54SThe etnaviv authors 	mutex_unlock(&gpu->lock);
1685a8c21a54SThe etnaviv authors 
1686a8c21a54SThe etnaviv authors 	return 0;
1687a8c21a54SThe etnaviv authors }
1688a8c21a54SThe etnaviv authors 
1689bcdfb5e5SRussell King static int
etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device * cdev,unsigned long * state)1690bcdfb5e5SRussell King etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1691bcdfb5e5SRussell King 				  unsigned long *state)
1692bcdfb5e5SRussell King {
1693bcdfb5e5SRussell King 	*state = 6;
1694bcdfb5e5SRussell King 
1695bcdfb5e5SRussell King 	return 0;
1696bcdfb5e5SRussell King }
1697bcdfb5e5SRussell King 
1698bcdfb5e5SRussell King static int
etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device * cdev,unsigned long * state)1699bcdfb5e5SRussell King etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1700bcdfb5e5SRussell King 				  unsigned long *state)
1701bcdfb5e5SRussell King {
1702bcdfb5e5SRussell King 	struct etnaviv_gpu *gpu = cdev->devdata;
1703bcdfb5e5SRussell King 
1704bcdfb5e5SRussell King 	*state = gpu->freq_scale;
1705bcdfb5e5SRussell King 
1706bcdfb5e5SRussell King 	return 0;
1707bcdfb5e5SRussell King }
1708bcdfb5e5SRussell King 
1709bcdfb5e5SRussell King static int
etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device * cdev,unsigned long state)1710bcdfb5e5SRussell King etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1711bcdfb5e5SRussell King 				  unsigned long state)
1712bcdfb5e5SRussell King {
1713bcdfb5e5SRussell King 	struct etnaviv_gpu *gpu = cdev->devdata;
1714bcdfb5e5SRussell King 
1715bcdfb5e5SRussell King 	mutex_lock(&gpu->lock);
1716bcdfb5e5SRussell King 	gpu->freq_scale = state;
1717bcdfb5e5SRussell King 	if (!pm_runtime_suspended(gpu->dev))
1718bcdfb5e5SRussell King 		etnaviv_gpu_update_clock(gpu);
1719bcdfb5e5SRussell King 	mutex_unlock(&gpu->lock);
1720bcdfb5e5SRussell King 
1721bcdfb5e5SRussell King 	return 0;
1722bcdfb5e5SRussell King }
1723bcdfb5e5SRussell King 
172496894b79SRikard Falkeborn static const struct thermal_cooling_device_ops cooling_ops = {
1725bcdfb5e5SRussell King 	.get_max_state = etnaviv_gpu_cooling_get_max_state,
1726bcdfb5e5SRussell King 	.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1727bcdfb5e5SRussell King 	.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1728bcdfb5e5SRussell King };
1729bcdfb5e5SRussell King 
etnaviv_gpu_bind(struct device * dev,struct device * master,void * data)1730a8c21a54SThe etnaviv authors static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1731a8c21a54SThe etnaviv authors 	void *data)
1732a8c21a54SThe etnaviv authors {
1733a8c21a54SThe etnaviv authors 	struct drm_device *drm = data;
1734a8c21a54SThe etnaviv authors 	struct etnaviv_drm_private *priv = drm->dev_private;
1735a8c21a54SThe etnaviv authors 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1736a8c21a54SThe etnaviv authors 	int ret;
1737a8c21a54SThe etnaviv authors 
173849b82c38SPhilipp Zabel 	if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
1739bcdfb5e5SRussell King 		gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1740bcdfb5e5SRussell King 				(char *)dev_name(dev), gpu, &cooling_ops);
1741bcdfb5e5SRussell King 		if (IS_ERR(gpu->cooling))
1742bcdfb5e5SRussell King 			return PTR_ERR(gpu->cooling);
17435247e2aaSLucas Stach 	}
1744bcdfb5e5SRussell King 
1745a7790d78SLucas Stach 	gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1746a7790d78SLucas Stach 	if (!gpu->wq) {
1747e93b6deeSLucas Stach 		ret = -ENOMEM;
1748e93b6deeSLucas Stach 		goto out_thermal;
1749a7790d78SLucas Stach 	}
1750a7790d78SLucas Stach 
1751e93b6deeSLucas Stach 	ret = etnaviv_sched_init(gpu);
1752e93b6deeSLucas Stach 	if (ret)
1753e93b6deeSLucas Stach 		goto out_workqueue;
1754e93b6deeSLucas Stach 
1755448406eaSLucas Stach 	if (!IS_ENABLED(CONFIG_PM)) {
1756a8c21a54SThe etnaviv authors 		ret = etnaviv_gpu_clk_enable(gpu);
1757e93b6deeSLucas Stach 		if (ret < 0)
1758e93b6deeSLucas Stach 			goto out_sched;
1759448406eaSLucas Stach 	}
1760a8c21a54SThe etnaviv authors 
1761a8c21a54SThe etnaviv authors 	gpu->drm = drm;
1762f54d1867SChris Wilson 	gpu->fence_context = dma_fence_context_alloc(1);
1763764be123SLucas Stach 	xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC);
1764a8c21a54SThe etnaviv authors 	spin_lock_init(&gpu->fence_spinlock);
1765a8c21a54SThe etnaviv authors 
1766357713ceSChristian Gmeiner 	INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1767a8c21a54SThe etnaviv authors 	init_waitqueue_head(&gpu->fence_event);
1768a8c21a54SThe etnaviv authors 
1769a8c21a54SThe etnaviv authors 	priv->gpu[priv->num_gpus++] = gpu;
1770a8c21a54SThe etnaviv authors 
1771a8c21a54SThe etnaviv authors 	return 0;
1772e93b6deeSLucas Stach 
1773e93b6deeSLucas Stach out_sched:
1774e93b6deeSLucas Stach 	etnaviv_sched_fini(gpu);
1775e93b6deeSLucas Stach 
1776e93b6deeSLucas Stach out_workqueue:
1777e93b6deeSLucas Stach 	destroy_workqueue(gpu->wq);
1778e93b6deeSLucas Stach 
1779e93b6deeSLucas Stach out_thermal:
1780e93b6deeSLucas Stach 	if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1781e93b6deeSLucas Stach 		thermal_cooling_device_unregister(gpu->cooling);
1782e93b6deeSLucas Stach 
1783e93b6deeSLucas Stach 	return ret;
1784a8c21a54SThe etnaviv authors }
1785a8c21a54SThe etnaviv authors 
etnaviv_gpu_unbind(struct device * dev,struct device * master,void * data)1786a8c21a54SThe etnaviv authors static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1787a8c21a54SThe etnaviv authors 	void *data)
1788a8c21a54SThe etnaviv authors {
1789a8c21a54SThe etnaviv authors 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1790a8c21a54SThe etnaviv authors 
1791a8c21a54SThe etnaviv authors 	DBG("%s", dev_name(gpu->dev));
1792a8c21a54SThe etnaviv authors 
1793a7790d78SLucas Stach 	destroy_workqueue(gpu->wq);
1794a7790d78SLucas Stach 
1795e93b6deeSLucas Stach 	etnaviv_sched_fini(gpu);
1796e93b6deeSLucas Stach 
17976b05266aSPaul Cercueil 	if (IS_ENABLED(CONFIG_PM)) {
1798a8c21a54SThe etnaviv authors 		pm_runtime_get_sync(gpu->dev);
1799a8c21a54SThe etnaviv authors 		pm_runtime_put_sync_suspend(gpu->dev);
18006b05266aSPaul Cercueil 	} else {
1801a8c21a54SThe etnaviv authors 		etnaviv_gpu_hw_suspend(gpu);
18027cb54494SLucas Stach 		etnaviv_gpu_clk_disable(gpu);
18036b05266aSPaul Cercueil 	}
1804a8c21a54SThe etnaviv authors 
18058f3eea9dSLucas Stach 	if (gpu->mmu_context)
18068f3eea9dSLucas Stach 		etnaviv_iommu_context_put(gpu->mmu_context);
18078f3eea9dSLucas Stach 
18082f9225dbSLucas Stach 	etnaviv_cmdbuf_free(&gpu->buffer);
180927b67278SLucas Stach 	etnaviv_iommu_global_fini(gpu);
1810a8c21a54SThe etnaviv authors 
1811a8c21a54SThe etnaviv authors 	gpu->drm = NULL;
1812764be123SLucas Stach 	xa_destroy(&gpu->user_fences);
1813bcdfb5e5SRussell King 
181449b82c38SPhilipp Zabel 	if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1815bcdfb5e5SRussell King 		thermal_cooling_device_unregister(gpu->cooling);
1816bcdfb5e5SRussell King 	gpu->cooling = NULL;
1817a8c21a54SThe etnaviv authors }
1818a8c21a54SThe etnaviv authors 
1819a8c21a54SThe etnaviv authors static const struct component_ops gpu_ops = {
1820a8c21a54SThe etnaviv authors 	.bind = etnaviv_gpu_bind,
1821a8c21a54SThe etnaviv authors 	.unbind = etnaviv_gpu_unbind,
1822a8c21a54SThe etnaviv authors };
1823a8c21a54SThe etnaviv authors 
1824a8c21a54SThe etnaviv authors static const struct of_device_id etnaviv_gpu_match[] = {
1825a8c21a54SThe etnaviv authors 	{
1826a8c21a54SThe etnaviv authors 		.compatible = "vivante,gc"
1827a8c21a54SThe etnaviv authors 	},
1828a8c21a54SThe etnaviv authors 	{ /* sentinel */ }
1829a8c21a54SThe etnaviv authors };
1830246774d1SLucas Stach MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
1831a8c21a54SThe etnaviv authors 
etnaviv_gpu_platform_probe(struct platform_device * pdev)1832a8c21a54SThe etnaviv authors static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1833a8c21a54SThe etnaviv authors {
1834a8c21a54SThe etnaviv authors 	struct device *dev = &pdev->dev;
1835a8c21a54SThe etnaviv authors 	struct etnaviv_gpu *gpu;
1836dc227890SFabio Estevam 	int err;
1837a8c21a54SThe etnaviv authors 
1838a8c21a54SThe etnaviv authors 	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1839a8c21a54SThe etnaviv authors 	if (!gpu)
1840a8c21a54SThe etnaviv authors 		return -ENOMEM;
1841a8c21a54SThe etnaviv authors 
1842a8c21a54SThe etnaviv authors 	gpu->dev = &pdev->dev;
1843a8c21a54SThe etnaviv authors 	mutex_init(&gpu->lock);
18442cd5bd98SLucas Stach 	mutex_init(&gpu->sched_lock);
1845a8c21a54SThe etnaviv authors 
1846a8c21a54SThe etnaviv authors 	/* Map registers: */
1847facb180dSFabio Estevam 	gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
1848a8c21a54SThe etnaviv authors 	if (IS_ERR(gpu->mmio))
1849a8c21a54SThe etnaviv authors 		return PTR_ERR(gpu->mmio);
1850a8c21a54SThe etnaviv authors 
1851a8c21a54SThe etnaviv authors 	/* Get Interrupt: */
1852a8c21a54SThe etnaviv authors 	gpu->irq = platform_get_irq(pdev, 0);
18530e63302dSTian Tao 	if (gpu->irq < 0)
1854db60eda3SFabio Estevam 		return gpu->irq;
1855a8c21a54SThe etnaviv authors 
1856a8c21a54SThe etnaviv authors 	err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1857a8c21a54SThe etnaviv authors 			       dev_name(gpu->dev), gpu);
1858a8c21a54SThe etnaviv authors 	if (err) {
1859a8c21a54SThe etnaviv authors 		dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1860db60eda3SFabio Estevam 		return err;
1861a8c21a54SThe etnaviv authors 	}
1862a8c21a54SThe etnaviv authors 
1863a8c21a54SThe etnaviv authors 	/* Get Clocks: */
1864f76fc5ffSLubomir Rintel 	gpu->clk_reg = devm_clk_get_optional(&pdev->dev, "reg");
186565f037e8SLucas Stach 	DBG("clk_reg: %p", gpu->clk_reg);
186665f037e8SLucas Stach 	if (IS_ERR(gpu->clk_reg))
1867f76fc5ffSLubomir Rintel 		return PTR_ERR(gpu->clk_reg);
186865f037e8SLucas Stach 
1869f76fc5ffSLubomir Rintel 	gpu->clk_bus = devm_clk_get_optional(&pdev->dev, "bus");
1870a8c21a54SThe etnaviv authors 	DBG("clk_bus: %p", gpu->clk_bus);
1871a8c21a54SThe etnaviv authors 	if (IS_ERR(gpu->clk_bus))
1872f76fc5ffSLubomir Rintel 		return PTR_ERR(gpu->clk_bus);
1873a8c21a54SThe etnaviv authors 
1874a59052d2SLubomir Rintel 	gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1875a8c21a54SThe etnaviv authors 	DBG("clk_core: %p", gpu->clk_core);
1876a8c21a54SThe etnaviv authors 	if (IS_ERR(gpu->clk_core))
1877f76fc5ffSLubomir Rintel 		return PTR_ERR(gpu->clk_core);
1878d79fd1ccSLucas Stach 	gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1879a8c21a54SThe etnaviv authors 
1880f76fc5ffSLubomir Rintel 	gpu->clk_shader = devm_clk_get_optional(&pdev->dev, "shader");
1881a8c21a54SThe etnaviv authors 	DBG("clk_shader: %p", gpu->clk_shader);
1882a8c21a54SThe etnaviv authors 	if (IS_ERR(gpu->clk_shader))
1883f76fc5ffSLubomir Rintel 		return PTR_ERR(gpu->clk_shader);
1884d79fd1ccSLucas Stach 	gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1885a8c21a54SThe etnaviv authors 
1886a8c21a54SThe etnaviv authors 	/* TODO: figure out max mapped size */
1887a8c21a54SThe etnaviv authors 	dev_set_drvdata(dev, gpu);
1888a8c21a54SThe etnaviv authors 
1889a8c21a54SThe etnaviv authors 	/*
1890a8c21a54SThe etnaviv authors 	 * We treat the device as initially suspended.  The runtime PM
1891a8c21a54SThe etnaviv authors 	 * autosuspend delay is rather arbitary: no measurements have
1892a8c21a54SThe etnaviv authors 	 * yet been performed to determine an appropriate value.
1893a8c21a54SThe etnaviv authors 	 */
1894a8c21a54SThe etnaviv authors 	pm_runtime_use_autosuspend(gpu->dev);
1895a8c21a54SThe etnaviv authors 	pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1896a8c21a54SThe etnaviv authors 	pm_runtime_enable(gpu->dev);
1897a8c21a54SThe etnaviv authors 
1898a8c21a54SThe etnaviv authors 	err = component_add(&pdev->dev, &gpu_ops);
1899a8c21a54SThe etnaviv authors 	if (err < 0) {
1900a8c21a54SThe etnaviv authors 		dev_err(&pdev->dev, "failed to register component: %d\n", err);
1901db60eda3SFabio Estevam 		return err;
1902a8c21a54SThe etnaviv authors 	}
1903a8c21a54SThe etnaviv authors 
1904a8c21a54SThe etnaviv authors 	return 0;
1905a8c21a54SThe etnaviv authors }
1906a8c21a54SThe etnaviv authors 
etnaviv_gpu_platform_remove(struct platform_device * pdev)1907a8c21a54SThe etnaviv authors static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1908a8c21a54SThe etnaviv authors {
1909a8c21a54SThe etnaviv authors 	component_del(&pdev->dev, &gpu_ops);
1910a8c21a54SThe etnaviv authors 	pm_runtime_disable(&pdev->dev);
1911a8c21a54SThe etnaviv authors 	return 0;
1912a8c21a54SThe etnaviv authors }
1913a8c21a54SThe etnaviv authors 
etnaviv_gpu_rpm_suspend(struct device * dev)1914a8c21a54SThe etnaviv authors static int etnaviv_gpu_rpm_suspend(struct device *dev)
1915a8c21a54SThe etnaviv authors {
1916a8c21a54SThe etnaviv authors 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1917a8c21a54SThe etnaviv authors 	u32 idle, mask;
1918a8c21a54SThe etnaviv authors 
1919f4163814SLucas Stach 	/* If there are any jobs in the HW queue, we're not idle */
1920f4163814SLucas Stach 	if (atomic_read(&gpu->sched.hw_rq_count))
1921a8c21a54SThe etnaviv authors 		return -EBUSY;
1922a8c21a54SThe etnaviv authors 
19231a910c11SGuido Günther 	/* Check whether the hardware (except FE and MC) is idle */
19241a910c11SGuido Günther 	mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
19251a910c11SGuido Günther 				  VIVS_HI_IDLE_STATE_MC);
1926a8c21a54SThe etnaviv authors 	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
192778f2bfa3SGuido Günther 	if (idle != mask) {
192878f2bfa3SGuido Günther 		dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
192978f2bfa3SGuido Günther 				     idle);
1930a8c21a54SThe etnaviv authors 		return -EBUSY;
193178f2bfa3SGuido Günther 	}
1932a8c21a54SThe etnaviv authors 
19337cb54494SLucas Stach 	etnaviv_gpu_hw_suspend(gpu);
19347cb54494SLucas Stach 
1935647d817dSLucas Stach 	gpu->state = ETNA_GPU_STATE_IDENTIFIED;
1936647d817dSLucas Stach 
19377cb54494SLucas Stach 	return etnaviv_gpu_clk_disable(gpu);
1938a8c21a54SThe etnaviv authors }
1939a8c21a54SThe etnaviv authors 
etnaviv_gpu_rpm_resume(struct device * dev)1940a8c21a54SThe etnaviv authors static int etnaviv_gpu_rpm_resume(struct device *dev)
1941a8c21a54SThe etnaviv authors {
1942a8c21a54SThe etnaviv authors 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1943a8c21a54SThe etnaviv authors 	int ret;
1944a8c21a54SThe etnaviv authors 
1945a8c21a54SThe etnaviv authors 	ret = etnaviv_gpu_clk_enable(gpu);
1946a8c21a54SThe etnaviv authors 	if (ret)
1947a8c21a54SThe etnaviv authors 		return ret;
1948a8c21a54SThe etnaviv authors 
1949a8c21a54SThe etnaviv authors 	/* Re-initialise the basic hardware state */
1950647d817dSLucas Stach 	if (gpu->state == ETNA_GPU_STATE_IDENTIFIED) {
1951a8c21a54SThe etnaviv authors 		ret = etnaviv_gpu_hw_resume(gpu);
1952a8c21a54SThe etnaviv authors 		if (ret) {
1953a8c21a54SThe etnaviv authors 			etnaviv_gpu_clk_disable(gpu);
1954a8c21a54SThe etnaviv authors 			return ret;
1955a8c21a54SThe etnaviv authors 		}
1956a8c21a54SThe etnaviv authors 	}
1957a8c21a54SThe etnaviv authors 
1958a8c21a54SThe etnaviv authors 	return 0;
1959a8c21a54SThe etnaviv authors }
1960a8c21a54SThe etnaviv authors 
1961a8c21a54SThe etnaviv authors static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
19626b05266aSPaul Cercueil 	RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, NULL)
1963a8c21a54SThe etnaviv authors };
1964a8c21a54SThe etnaviv authors 
1965a8c21a54SThe etnaviv authors struct platform_driver etnaviv_gpu_driver = {
1966a8c21a54SThe etnaviv authors 	.driver = {
1967a8c21a54SThe etnaviv authors 		.name = "etnaviv-gpu",
1968a8c21a54SThe etnaviv authors 		.owner = THIS_MODULE,
19696b05266aSPaul Cercueil 		.pm = pm_ptr(&etnaviv_gpu_pm_ops),
1970a8c21a54SThe etnaviv authors 		.of_match_table = etnaviv_gpu_match,
1971a8c21a54SThe etnaviv authors 	},
1972a8c21a54SThe etnaviv authors 	.probe = etnaviv_gpu_platform_probe,
1973a8c21a54SThe etnaviv authors 	.remove = etnaviv_gpu_platform_remove,
1974a8c21a54SThe etnaviv authors 	.id_table = gpu_ids,
1975a8c21a54SThe etnaviv authors };
1976