1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
7  */
8 
9 #ifndef __ADRENO_GPU_H__
10 #define __ADRENO_GPU_H__
11 
12 #include <linux/firmware.h>
13 #include <linux/iopoll.h>
14 
15 #include "msm_gpu.h"
16 
17 #include "adreno_common.xml.h"
18 #include "adreno_pm4.xml.h"
19 
20 #define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
21 #define REG_SKIP ~0
22 #define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
23 
24 /**
25  * adreno_regs: List of registers that are used in across all
26  * 3D devices. Each device type has different offset value for the same
27  * register, so an array of register offsets are declared for every device
28  * and are indexed by the enumeration values defined in this enum
29  */
30 enum adreno_regs {
31 	REG_ADRENO_CP_RB_BASE,
32 	REG_ADRENO_CP_RB_BASE_HI,
33 	REG_ADRENO_CP_RB_RPTR_ADDR,
34 	REG_ADRENO_CP_RB_RPTR_ADDR_HI,
35 	REG_ADRENO_CP_RB_RPTR,
36 	REG_ADRENO_CP_RB_WPTR,
37 	REG_ADRENO_CP_RB_CNTL,
38 	REG_ADRENO_REGISTER_MAX,
39 };
40 
41 enum {
42 	ADRENO_FW_PM4 = 0,
43 	ADRENO_FW_SQE = 0, /* a6xx */
44 	ADRENO_FW_PFP = 1,
45 	ADRENO_FW_GMU = 1, /* a6xx */
46 	ADRENO_FW_GPMU = 2,
47 	ADRENO_FW_MAX,
48 };
49 
50 enum adreno_quirks {
51 	ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
52 	ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
53 	ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
54 };
55 
56 struct adreno_rev {
57 	uint8_t  core;
58 	uint8_t  major;
59 	uint8_t  minor;
60 	uint8_t  patchid;
61 };
62 
63 #define ADRENO_REV(core, major, minor, patchid) \
64 	((struct adreno_rev){ core, major, minor, patchid })
65 
66 struct adreno_gpu_funcs {
67 	struct msm_gpu_funcs base;
68 	int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
69 };
70 
71 struct adreno_info {
72 	struct adreno_rev rev;
73 	uint32_t revn;
74 	const char *name;
75 	const char *fw[ADRENO_FW_MAX];
76 	uint32_t gmem;
77 	enum adreno_quirks quirks;
78 	struct msm_gpu *(*init)(struct drm_device *dev);
79 	const char *zapfw;
80 	u32 inactive_period;
81 };
82 
83 const struct adreno_info *adreno_info(struct adreno_rev rev);
84 
85 struct adreno_gpu {
86 	struct msm_gpu base;
87 	struct adreno_rev rev;
88 	const struct adreno_info *info;
89 	uint32_t gmem;  /* actual gmem size */
90 	uint32_t revn;  /* numeric revision name */
91 	const struct adreno_gpu_funcs *funcs;
92 
93 	/* interesting register offsets to dump: */
94 	const unsigned int *registers;
95 
96 	/*
97 	 * Are we loading fw from legacy path?  Prior to addition
98 	 * of gpu firmware to linux-firmware, the fw files were
99 	 * placed in toplevel firmware directory, following qcom's
100 	 * android kernel.  But linux-firmware preferred they be
101 	 * placed in a 'qcom' subdirectory.
102 	 *
103 	 * For backwards compatibility, we try first to load from
104 	 * the new path, using request_firmware_direct() to avoid
105 	 * any potential timeout waiting for usermode helper, then
106 	 * fall back to the old path (with direct load).  And
107 	 * finally fall back to request_firmware() with the new
108 	 * path to allow the usermode helper.
109 	 */
110 	enum {
111 		FW_LOCATION_UNKNOWN = 0,
112 		FW_LOCATION_NEW,       /* /lib/firmware/qcom/$fwfile */
113 		FW_LOCATION_LEGACY,    /* /lib/firmware/$fwfile */
114 		FW_LOCATION_HELPER,
115 	} fwloc;
116 
117 	/* firmware: */
118 	const struct firmware *fw[ADRENO_FW_MAX];
119 
120 	/*
121 	 * Register offsets are different between some GPUs.
122 	 * GPU specific offsets will be exported by GPU specific
123 	 * code (a3xx_gpu.c) and stored in this common location.
124 	 */
125 	const unsigned int *reg_offsets;
126 };
127 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
128 
129 struct adreno_ocmem {
130 	struct ocmem *ocmem;
131 	unsigned long base;
132 	void *hdl;
133 };
134 
135 /* platform config data (ie. from DT, or pdata) */
136 struct adreno_platform_config {
137 	struct adreno_rev rev;
138 };
139 
140 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
141 
142 #define spin_until(X) ({                                   \
143 	int __ret = -ETIMEDOUT;                            \
144 	unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
145 	do {                                               \
146 		if (X) {                                   \
147 			__ret = 0;                         \
148 			break;                             \
149 		}                                          \
150 	} while (time_before(jiffies, __t));               \
151 	__ret;                                             \
152 })
153 
154 static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
155 {
156 	return (gpu->revn < 300);
157 }
158 
159 static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
160 {
161 	return (gpu->revn < 210);
162 }
163 
164 static inline bool adreno_is_a225(struct adreno_gpu *gpu)
165 {
166 	return gpu->revn == 225;
167 }
168 
169 static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
170 {
171 	return (gpu->revn >= 300) && (gpu->revn < 400);
172 }
173 
174 static inline bool adreno_is_a305(struct adreno_gpu *gpu)
175 {
176 	return gpu->revn == 305;
177 }
178 
179 static inline bool adreno_is_a306(struct adreno_gpu *gpu)
180 {
181 	/* yes, 307, because a305c is 306 */
182 	return gpu->revn == 307;
183 }
184 
185 static inline bool adreno_is_a320(struct adreno_gpu *gpu)
186 {
187 	return gpu->revn == 320;
188 }
189 
190 static inline bool adreno_is_a330(struct adreno_gpu *gpu)
191 {
192 	return gpu->revn == 330;
193 }
194 
195 static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
196 {
197 	return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
198 }
199 
200 static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
201 {
202 	return (gpu->revn >= 400) && (gpu->revn < 500);
203 }
204 
205 static inline int adreno_is_a405(struct adreno_gpu *gpu)
206 {
207 	return gpu->revn == 405;
208 }
209 
210 static inline int adreno_is_a420(struct adreno_gpu *gpu)
211 {
212 	return gpu->revn == 420;
213 }
214 
215 static inline int adreno_is_a430(struct adreno_gpu *gpu)
216 {
217        return gpu->revn == 430;
218 }
219 
220 static inline int adreno_is_a510(struct adreno_gpu *gpu)
221 {
222 	return gpu->revn == 510;
223 }
224 
225 static inline int adreno_is_a530(struct adreno_gpu *gpu)
226 {
227 	return gpu->revn == 530;
228 }
229 
230 static inline int adreno_is_a540(struct adreno_gpu *gpu)
231 {
232 	return gpu->revn == 540;
233 }
234 
235 static inline int adreno_is_a618(struct adreno_gpu *gpu)
236 {
237        return gpu->revn == 618;
238 }
239 
240 static inline int adreno_is_a630(struct adreno_gpu *gpu)
241 {
242        return gpu->revn == 630;
243 }
244 
245 static inline int adreno_is_a640(struct adreno_gpu *gpu)
246 {
247        return gpu->revn == 640;
248 }
249 
250 static inline int adreno_is_a650(struct adreno_gpu *gpu)
251 {
252        return gpu->revn == 650;
253 }
254 
255 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
256 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
257 		const char *fwname);
258 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
259 		const struct firmware *fw, u64 *iova);
260 int adreno_hw_init(struct msm_gpu *gpu);
261 void adreno_recover(struct msm_gpu *gpu);
262 void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
263 		struct msm_file_private *ctx);
264 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
265 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
266 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
267 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
268 		struct drm_printer *p);
269 #endif
270 void adreno_dump_info(struct msm_gpu *gpu);
271 void adreno_dump(struct msm_gpu *gpu);
272 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
273 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
274 
275 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
276 			  struct adreno_ocmem *ocmem);
277 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
278 
279 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
280 		struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
281 		int nr_rings);
282 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
283 int adreno_load_fw(struct adreno_gpu *adreno_gpu);
284 
285 void adreno_gpu_state_destroy(struct msm_gpu_state *state);
286 
287 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
288 int adreno_gpu_state_put(struct msm_gpu_state *state);
289 
290 /*
291  * Common helper function to initialize the default address space for arm-smmu
292  * attached targets
293  */
294 struct msm_gem_address_space *
295 adreno_iommu_create_address_space(struct msm_gpu *gpu,
296 		struct platform_device *pdev);
297 
298 /*
299  * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
300  * out of secure mode
301  */
302 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
303 
304 /* ringbuffer helpers (the parts that are adreno specific) */
305 
306 static inline void
307 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
308 {
309 	adreno_wait_ring(ring, cnt+1);
310 	OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
311 }
312 
313 /* no-op packet: */
314 static inline void
315 OUT_PKT2(struct msm_ringbuffer *ring)
316 {
317 	adreno_wait_ring(ring, 1);
318 	OUT_RING(ring, CP_TYPE2_PKT);
319 }
320 
321 static inline void
322 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
323 {
324 	adreno_wait_ring(ring, cnt+1);
325 	OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
326 }
327 
328 static inline u32 PM4_PARITY(u32 val)
329 {
330 	return (0x9669 >> (0xF & (val ^
331 		(val >> 4) ^ (val >> 8) ^ (val >> 12) ^
332 		(val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
333 		(val >> 28)))) & 1;
334 }
335 
336 /* Maximum number of values that can be executed for one opcode */
337 #define TYPE4_MAX_PAYLOAD 127
338 
339 #define PKT4(_reg, _cnt) \
340 	(CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
341 	 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
342 
343 static inline void
344 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
345 {
346 	adreno_wait_ring(ring, cnt + 1);
347 	OUT_RING(ring, PKT4(regindx, cnt));
348 }
349 
350 static inline void
351 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
352 {
353 	adreno_wait_ring(ring, cnt + 1);
354 	OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
355 		((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
356 }
357 
358 /*
359  * adreno_reg_check() - Checks the validity of a register enum
360  * @gpu:		Pointer to struct adreno_gpu
361  * @offset_name:	The register enum that is checked
362  */
363 static inline bool adreno_reg_check(struct adreno_gpu *gpu,
364 		enum adreno_regs offset_name)
365 {
366 	BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
367 
368 	/*
369 	 * REG_SKIP is a special value that tell us that the register in
370 	 * question isn't implemented on target but don't trigger a BUG(). This
371 	 * is used to cleanly implement adreno_gpu_write64() and
372 	 * adreno_gpu_read64() in a generic fashion
373 	 */
374 	if (gpu->reg_offsets[offset_name] == REG_SKIP)
375 		return false;
376 
377 	return true;
378 }
379 
380 static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
381 		enum adreno_regs offset_name)
382 {
383 	u32 reg = gpu->reg_offsets[offset_name];
384 	u32 val = 0;
385 	if(adreno_reg_check(gpu,offset_name))
386 		val = gpu_read(&gpu->base, reg - 1);
387 	return val;
388 }
389 
390 static inline void adreno_gpu_write(struct adreno_gpu *gpu,
391 		enum adreno_regs offset_name, u32 data)
392 {
393 	u32 reg = gpu->reg_offsets[offset_name];
394 	if(adreno_reg_check(gpu, offset_name))
395 		gpu_write(&gpu->base, reg - 1, data);
396 }
397 
398 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
399 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
400 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
401 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
402 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
403 
404 static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
405 		enum adreno_regs lo, enum adreno_regs hi, u64 data)
406 {
407 	adreno_gpu_write(gpu, lo, lower_32_bits(data));
408 	adreno_gpu_write(gpu, hi, upper_32_bits(data));
409 }
410 
411 static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
412 {
413 	return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
414 }
415 
416 /*
417  * Given a register and a count, return a value to program into
418  * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
419  * registers starting at _reg.
420  *
421  * The register base needs to be a multiple of the length. If it is not, the
422  * hardware will quietly mask off the bits for you and shift the size. For
423  * example, if you intend the protection to start at 0x07 for a length of 4
424  * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
425  * expose registers you intended to protect!
426  */
427 #define ADRENO_PROTECT_RW(_reg, _len) \
428 	((1 << 30) | (1 << 29) | \
429 	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
430 
431 /*
432  * Same as above, but allow reads over the range. For areas of mixed use (such
433  * as performance counters) this allows us to protect a much larger range with a
434  * single register
435  */
436 #define ADRENO_PROTECT_RDONLY(_reg, _len) \
437 	((1 << 29) \
438 	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
439 
440 
441 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
442 	readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
443 		interval, timeout)
444 
445 #endif /* __ADRENO_GPU_H__ */
446