1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published by
9  * the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef __ADRENO_GPU_H__
21 #define __ADRENO_GPU_H__
22 
23 #include <linux/firmware.h>
24 
25 #include "msm_gpu.h"
26 
27 #include "adreno_common.xml.h"
28 #include "adreno_pm4.xml.h"
29 
30 #define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
31 #define REG_SKIP ~0
32 #define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
33 
34 /**
35  * adreno_regs: List of registers that are used in across all
36  * 3D devices. Each device type has different offset value for the same
37  * register, so an array of register offsets are declared for every device
38  * and are indexed by the enumeration values defined in this enum
39  */
40 enum adreno_regs {
41 	REG_ADRENO_CP_RB_BASE,
42 	REG_ADRENO_CP_RB_BASE_HI,
43 	REG_ADRENO_CP_RB_RPTR_ADDR,
44 	REG_ADRENO_CP_RB_RPTR_ADDR_HI,
45 	REG_ADRENO_CP_RB_RPTR,
46 	REG_ADRENO_CP_RB_WPTR,
47 	REG_ADRENO_CP_RB_CNTL,
48 	REG_ADRENO_REGISTER_MAX,
49 };
50 
51 enum adreno_quirks {
52 	ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
53 	ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
54 };
55 
56 struct adreno_rev {
57 	uint8_t  core;
58 	uint8_t  major;
59 	uint8_t  minor;
60 	uint8_t  patchid;
61 };
62 
63 #define ADRENO_REV(core, major, minor, patchid) \
64 	((struct adreno_rev){ core, major, minor, patchid })
65 
66 struct adreno_gpu_funcs {
67 	struct msm_gpu_funcs base;
68 	int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
69 };
70 
71 struct adreno_info {
72 	struct adreno_rev rev;
73 	uint32_t revn;
74 	const char *name;
75 	const char *pm4fw, *pfpfw;
76 	const char *gpmufw;
77 	uint32_t gmem;
78 	enum adreno_quirks quirks;
79 	struct msm_gpu *(*init)(struct drm_device *dev);
80 	const char *zapfw;
81 };
82 
83 const struct adreno_info *adreno_info(struct adreno_rev rev);
84 
85 struct adreno_gpu {
86 	struct msm_gpu base;
87 	struct adreno_rev rev;
88 	const struct adreno_info *info;
89 	uint32_t gmem;  /* actual gmem size */
90 	uint32_t revn;  /* numeric revision name */
91 	const struct adreno_gpu_funcs *funcs;
92 
93 	/* interesting register offsets to dump: */
94 	const unsigned int *registers;
95 
96 	/*
97 	 * Are we loading fw from legacy path?  Prior to addition
98 	 * of gpu firmware to linux-firmware, the fw files were
99 	 * placed in toplevel firmware directory, following qcom's
100 	 * android kernel.  But linux-firmware preferred they be
101 	 * placed in a 'qcom' subdirectory.
102 	 *
103 	 * For backwards compatibility, we try first to load from
104 	 * the new path, using request_firmware_direct() to avoid
105 	 * any potential timeout waiting for usermode helper, then
106 	 * fall back to the old path (with direct load).  And
107 	 * finally fall back to request_firmware() with the new
108 	 * path to allow the usermode helper.
109 	 */
110 	enum {
111 		FW_LOCATION_UNKNOWN = 0,
112 		FW_LOCATION_NEW,       /* /lib/firmware/qcom/$fwfile */
113 		FW_LOCATION_LEGACY,    /* /lib/firmware/$fwfile */
114 		FW_LOCATION_HELPER,
115 	} fwloc;
116 
117 	/* firmware: */
118 	const struct firmware *pm4, *pfp;
119 
120 	/*
121 	 * Register offsets are different between some GPUs.
122 	 * GPU specific offsets will be exported by GPU specific
123 	 * code (a3xx_gpu.c) and stored in this common location.
124 	 */
125 	const unsigned int *reg_offsets;
126 };
127 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
128 
129 /* platform config data (ie. from DT, or pdata) */
130 struct adreno_platform_config {
131 	struct adreno_rev rev;
132 	uint32_t fast_rate;
133 };
134 
135 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
136 
137 #define spin_until(X) ({                                   \
138 	int __ret = -ETIMEDOUT;                            \
139 	unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
140 	do {                                               \
141 		if (X) {                                   \
142 			__ret = 0;                         \
143 			break;                             \
144 		}                                          \
145 	} while (time_before(jiffies, __t));               \
146 	__ret;                                             \
147 })
148 
149 
150 static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
151 {
152 	return (gpu->revn >= 300) && (gpu->revn < 400);
153 }
154 
155 static inline bool adreno_is_a305(struct adreno_gpu *gpu)
156 {
157 	return gpu->revn == 305;
158 }
159 
160 static inline bool adreno_is_a306(struct adreno_gpu *gpu)
161 {
162 	/* yes, 307, because a305c is 306 */
163 	return gpu->revn == 307;
164 }
165 
166 static inline bool adreno_is_a320(struct adreno_gpu *gpu)
167 {
168 	return gpu->revn == 320;
169 }
170 
171 static inline bool adreno_is_a330(struct adreno_gpu *gpu)
172 {
173 	return gpu->revn == 330;
174 }
175 
176 static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
177 {
178 	return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
179 }
180 
181 static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
182 {
183 	return (gpu->revn >= 400) && (gpu->revn < 500);
184 }
185 
186 static inline int adreno_is_a420(struct adreno_gpu *gpu)
187 {
188 	return gpu->revn == 420;
189 }
190 
191 static inline int adreno_is_a430(struct adreno_gpu *gpu)
192 {
193        return gpu->revn == 430;
194 }
195 
196 static inline int adreno_is_a530(struct adreno_gpu *gpu)
197 {
198 	return gpu->revn == 530;
199 }
200 
201 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
202 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
203 		const char *fwname);
204 int adreno_hw_init(struct msm_gpu *gpu);
205 void adreno_recover(struct msm_gpu *gpu);
206 void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
207 		struct msm_file_private *ctx);
208 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
209 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
210 #ifdef CONFIG_DEBUG_FS
211 void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
212 #endif
213 void adreno_dump_info(struct msm_gpu *gpu);
214 void adreno_dump(struct msm_gpu *gpu);
215 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
216 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
217 
218 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
219 		struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
220 		int nr_rings);
221 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
222 
223 
224 /* ringbuffer helpers (the parts that are adreno specific) */
225 
226 static inline void
227 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
228 {
229 	adreno_wait_ring(ring, cnt+1);
230 	OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
231 }
232 
233 /* no-op packet: */
234 static inline void
235 OUT_PKT2(struct msm_ringbuffer *ring)
236 {
237 	adreno_wait_ring(ring, 1);
238 	OUT_RING(ring, CP_TYPE2_PKT);
239 }
240 
241 static inline void
242 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
243 {
244 	adreno_wait_ring(ring, cnt+1);
245 	OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
246 }
247 
248 static inline u32 PM4_PARITY(u32 val)
249 {
250 	return (0x9669 >> (0xF & (val ^
251 		(val >> 4) ^ (val >> 8) ^ (val >> 12) ^
252 		(val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
253 		(val >> 28)))) & 1;
254 }
255 
256 /* Maximum number of values that can be executed for one opcode */
257 #define TYPE4_MAX_PAYLOAD 127
258 
259 #define PKT4(_reg, _cnt) \
260 	(CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
261 	 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
262 
263 static inline void
264 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
265 {
266 	adreno_wait_ring(ring, cnt + 1);
267 	OUT_RING(ring, PKT4(regindx, cnt));
268 }
269 
270 static inline void
271 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
272 {
273 	adreno_wait_ring(ring, cnt + 1);
274 	OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
275 		((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
276 }
277 
278 /*
279  * adreno_reg_check() - Checks the validity of a register enum
280  * @gpu:		Pointer to struct adreno_gpu
281  * @offset_name:	The register enum that is checked
282  */
283 static inline bool adreno_reg_check(struct adreno_gpu *gpu,
284 		enum adreno_regs offset_name)
285 {
286 	if (offset_name >= REG_ADRENO_REGISTER_MAX ||
287 			!gpu->reg_offsets[offset_name]) {
288 		BUG();
289 	}
290 
291 	/*
292 	 * REG_SKIP is a special value that tell us that the register in
293 	 * question isn't implemented on target but don't trigger a BUG(). This
294 	 * is used to cleanly implement adreno_gpu_write64() and
295 	 * adreno_gpu_read64() in a generic fashion
296 	 */
297 	if (gpu->reg_offsets[offset_name] == REG_SKIP)
298 		return false;
299 
300 	return true;
301 }
302 
303 static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
304 		enum adreno_regs offset_name)
305 {
306 	u32 reg = gpu->reg_offsets[offset_name];
307 	u32 val = 0;
308 	if(adreno_reg_check(gpu,offset_name))
309 		val = gpu_read(&gpu->base, reg - 1);
310 	return val;
311 }
312 
313 static inline void adreno_gpu_write(struct adreno_gpu *gpu,
314 		enum adreno_regs offset_name, u32 data)
315 {
316 	u32 reg = gpu->reg_offsets[offset_name];
317 	if(adreno_reg_check(gpu, offset_name))
318 		gpu_write(&gpu->base, reg - 1, data);
319 }
320 
321 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
322 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
323 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
324 
325 static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
326 		enum adreno_regs lo, enum adreno_regs hi, u64 data)
327 {
328 	adreno_gpu_write(gpu, lo, lower_32_bits(data));
329 	adreno_gpu_write(gpu, hi, upper_32_bits(data));
330 }
331 
332 static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
333 {
334 	return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
335 }
336 
337 /*
338  * Given a register and a count, return a value to program into
339  * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
340  * registers starting at _reg.
341  *
342  * The register base needs to be a multiple of the length. If it is not, the
343  * hardware will quietly mask off the bits for you and shift the size. For
344  * example, if you intend the protection to start at 0x07 for a length of 4
345  * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
346  * expose registers you intended to protect!
347  */
348 #define ADRENO_PROTECT_RW(_reg, _len) \
349 	((1 << 30) | (1 << 29) | \
350 	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
351 
352 /*
353  * Same as above, but allow reads over the range. For areas of mixed use (such
354  * as performance counters) this allows us to protect a much larger range with a
355  * single register
356  */
357 #define ADRENO_PROTECT_RDONLY(_reg, _len) \
358 	((1 << 29) \
359 	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
360 
361 #endif /* __ADRENO_GPU_H__ */
362