1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
7  */
8 
9 #ifndef __ADRENO_GPU_H__
10 #define __ADRENO_GPU_H__
11 
12 #include <linux/firmware.h>
13 #include <linux/iopoll.h>
14 
15 #include "msm_gpu.h"
16 
17 #include "adreno_common.xml.h"
18 #include "adreno_pm4.xml.h"
19 
20 extern bool snapshot_debugbus;
21 extern bool allow_vram_carveout;
22 
23 enum {
24 	ADRENO_FW_PM4 = 0,
25 	ADRENO_FW_SQE = 0, /* a6xx */
26 	ADRENO_FW_PFP = 1,
27 	ADRENO_FW_GMU = 1, /* a6xx */
28 	ADRENO_FW_GPMU = 2,
29 	ADRENO_FW_MAX,
30 };
31 
32 enum adreno_quirks {
33 	ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
34 	ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
35 	ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
36 };
37 
38 struct adreno_rev {
39 	uint8_t  core;
40 	uint8_t  major;
41 	uint8_t  minor;
42 	uint8_t  patchid;
43 };
44 
45 #define ADRENO_REV(core, major, minor, patchid) \
46 	((struct adreno_rev){ core, major, minor, patchid })
47 
48 struct adreno_gpu_funcs {
49 	struct msm_gpu_funcs base;
50 	int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
51 };
52 
53 struct adreno_reglist {
54 	u32 offset;
55 	u32 value;
56 };
57 
58 extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[];
59 
60 struct adreno_info {
61 	struct adreno_rev rev;
62 	uint32_t revn;
63 	const char *name;
64 	const char *fw[ADRENO_FW_MAX];
65 	uint32_t gmem;
66 	enum adreno_quirks quirks;
67 	struct msm_gpu *(*init)(struct drm_device *dev);
68 	const char *zapfw;
69 	u32 inactive_period;
70 	const struct adreno_reglist *hwcg;
71 };
72 
73 const struct adreno_info *adreno_info(struct adreno_rev rev);
74 
75 struct adreno_gpu {
76 	struct msm_gpu base;
77 	struct adreno_rev rev;
78 	const struct adreno_info *info;
79 	uint32_t gmem;  /* actual gmem size */
80 	uint32_t revn;  /* numeric revision name */
81 	const struct adreno_gpu_funcs *funcs;
82 
83 	/* interesting register offsets to dump: */
84 	const unsigned int *registers;
85 
86 	/*
87 	 * Are we loading fw from legacy path?  Prior to addition
88 	 * of gpu firmware to linux-firmware, the fw files were
89 	 * placed in toplevel firmware directory, following qcom's
90 	 * android kernel.  But linux-firmware preferred they be
91 	 * placed in a 'qcom' subdirectory.
92 	 *
93 	 * For backwards compatibility, we try first to load from
94 	 * the new path, using request_firmware_direct() to avoid
95 	 * any potential timeout waiting for usermode helper, then
96 	 * fall back to the old path (with direct load).  And
97 	 * finally fall back to request_firmware() with the new
98 	 * path to allow the usermode helper.
99 	 */
100 	enum {
101 		FW_LOCATION_UNKNOWN = 0,
102 		FW_LOCATION_NEW,       /* /lib/firmware/qcom/$fwfile */
103 		FW_LOCATION_LEGACY,    /* /lib/firmware/$fwfile */
104 		FW_LOCATION_HELPER,
105 	} fwloc;
106 
107 	/* firmware: */
108 	const struct firmware *fw[ADRENO_FW_MAX];
109 
110 	/*
111 	 * Register offsets are different between some GPUs.
112 	 * GPU specific offsets will be exported by GPU specific
113 	 * code (a3xx_gpu.c) and stored in this common location.
114 	 */
115 	const unsigned int *reg_offsets;
116 };
117 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
118 
119 struct adreno_ocmem {
120 	struct ocmem *ocmem;
121 	unsigned long base;
122 	void *hdl;
123 };
124 
125 /* platform config data (ie. from DT, or pdata) */
126 struct adreno_platform_config {
127 	struct adreno_rev rev;
128 };
129 
130 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
131 
132 #define spin_until(X) ({                                   \
133 	int __ret = -ETIMEDOUT;                            \
134 	unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
135 	do {                                               \
136 		if (X) {                                   \
137 			__ret = 0;                         \
138 			break;                             \
139 		}                                          \
140 	} while (time_before(jiffies, __t));               \
141 	__ret;                                             \
142 })
143 
144 static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
145 {
146 	return (gpu->revn < 300);
147 }
148 
149 static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
150 {
151 	return (gpu->revn < 210);
152 }
153 
154 static inline bool adreno_is_a225(struct adreno_gpu *gpu)
155 {
156 	return gpu->revn == 225;
157 }
158 
159 static inline bool adreno_is_a305(struct adreno_gpu *gpu)
160 {
161 	return gpu->revn == 305;
162 }
163 
164 static inline bool adreno_is_a306(struct adreno_gpu *gpu)
165 {
166 	/* yes, 307, because a305c is 306 */
167 	return gpu->revn == 307;
168 }
169 
170 static inline bool adreno_is_a320(struct adreno_gpu *gpu)
171 {
172 	return gpu->revn == 320;
173 }
174 
175 static inline bool adreno_is_a330(struct adreno_gpu *gpu)
176 {
177 	return gpu->revn == 330;
178 }
179 
180 static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
181 {
182 	return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
183 }
184 
185 static inline int adreno_is_a405(struct adreno_gpu *gpu)
186 {
187 	return gpu->revn == 405;
188 }
189 
190 static inline int adreno_is_a420(struct adreno_gpu *gpu)
191 {
192 	return gpu->revn == 420;
193 }
194 
195 static inline int adreno_is_a430(struct adreno_gpu *gpu)
196 {
197        return gpu->revn == 430;
198 }
199 
200 static inline int adreno_is_a510(struct adreno_gpu *gpu)
201 {
202 	return gpu->revn == 510;
203 }
204 
205 static inline int adreno_is_a530(struct adreno_gpu *gpu)
206 {
207 	return gpu->revn == 530;
208 }
209 
210 static inline int adreno_is_a540(struct adreno_gpu *gpu)
211 {
212 	return gpu->revn == 540;
213 }
214 
215 static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
216 {
217 	return ((gpu->revn < 700 && gpu->revn > 599));
218 }
219 
220 static inline int adreno_is_a618(struct adreno_gpu *gpu)
221 {
222        return gpu->revn == 618;
223 }
224 
225 static inline int adreno_is_a630(struct adreno_gpu *gpu)
226 {
227        return gpu->revn == 630;
228 }
229 
230 static inline int adreno_is_a640(struct adreno_gpu *gpu)
231 {
232        return gpu->revn == 640;
233 }
234 
235 static inline int adreno_is_a650(struct adreno_gpu *gpu)
236 {
237        return gpu->revn == 650;
238 }
239 
240 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
241 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
242 		const char *fwname);
243 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
244 		const struct firmware *fw, u64 *iova);
245 int adreno_hw_init(struct msm_gpu *gpu);
246 void adreno_recover(struct msm_gpu *gpu);
247 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
248 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
249 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
250 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
251 		struct drm_printer *p);
252 #endif
253 void adreno_dump_info(struct msm_gpu *gpu);
254 void adreno_dump(struct msm_gpu *gpu);
255 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
256 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
257 
258 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
259 			  struct adreno_ocmem *ocmem);
260 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
261 
262 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
263 		struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
264 		int nr_rings);
265 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
266 int adreno_load_fw(struct adreno_gpu *adreno_gpu);
267 
268 void adreno_gpu_state_destroy(struct msm_gpu_state *state);
269 
270 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
271 int adreno_gpu_state_put(struct msm_gpu_state *state);
272 
273 /*
274  * Common helper function to initialize the default address space for arm-smmu
275  * attached targets
276  */
277 struct msm_gem_address_space *
278 adreno_iommu_create_address_space(struct msm_gpu *gpu,
279 		struct platform_device *pdev);
280 
281 /*
282  * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
283  * out of secure mode
284  */
285 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
286 
287 /* ringbuffer helpers (the parts that are adreno specific) */
288 
289 static inline void
290 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
291 {
292 	adreno_wait_ring(ring, cnt+1);
293 	OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
294 }
295 
296 /* no-op packet: */
297 static inline void
298 OUT_PKT2(struct msm_ringbuffer *ring)
299 {
300 	adreno_wait_ring(ring, 1);
301 	OUT_RING(ring, CP_TYPE2_PKT);
302 }
303 
304 static inline void
305 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
306 {
307 	adreno_wait_ring(ring, cnt+1);
308 	OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
309 }
310 
311 static inline u32 PM4_PARITY(u32 val)
312 {
313 	return (0x9669 >> (0xF & (val ^
314 		(val >> 4) ^ (val >> 8) ^ (val >> 12) ^
315 		(val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
316 		(val >> 28)))) & 1;
317 }
318 
319 /* Maximum number of values that can be executed for one opcode */
320 #define TYPE4_MAX_PAYLOAD 127
321 
322 #define PKT4(_reg, _cnt) \
323 	(CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
324 	 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
325 
326 static inline void
327 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
328 {
329 	adreno_wait_ring(ring, cnt + 1);
330 	OUT_RING(ring, PKT4(regindx, cnt));
331 }
332 
333 static inline void
334 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
335 {
336 	adreno_wait_ring(ring, cnt + 1);
337 	OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
338 		((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
339 }
340 
341 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
342 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
343 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
344 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
345 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
346 
347 static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
348 {
349 	return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
350 }
351 
352 /*
353  * Given a register and a count, return a value to program into
354  * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
355  * registers starting at _reg.
356  *
357  * The register base needs to be a multiple of the length. If it is not, the
358  * hardware will quietly mask off the bits for you and shift the size. For
359  * example, if you intend the protection to start at 0x07 for a length of 4
360  * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
361  * expose registers you intended to protect!
362  */
363 #define ADRENO_PROTECT_RW(_reg, _len) \
364 	((1 << 30) | (1 << 29) | \
365 	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
366 
367 /*
368  * Same as above, but allow reads over the range. For areas of mixed use (such
369  * as performance counters) this allows us to protect a much larger range with a
370  * single register
371  */
372 #define ADRENO_PROTECT_RDONLY(_reg, _len) \
373 	((1 << 29) \
374 	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
375 
376 
377 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
378 	readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
379 		interval, timeout)
380 
381 #endif /* __ADRENO_GPU_H__ */
382