xref: /openbmc/linux/drivers/gpu/drm/msm/adreno/a5xx_gpu.h (revision 34d6f206a88c2651d216bd3487ac956a40b2ba8e)
197fb5e8dSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2b1fc2839SJordan Crouse /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3b5f103abSJordan Crouse  */
4b5f103abSJordan Crouse #ifndef __A5XX_GPU_H__
5b5f103abSJordan Crouse #define __A5XX_GPU_H__
6b5f103abSJordan Crouse 
7b5f103abSJordan Crouse #include "adreno_gpu.h"
8b5f103abSJordan Crouse 
9b5f103abSJordan Crouse /* Bringing over the hack from the previous targets */
10b5f103abSJordan Crouse #undef ROP_COPY
11b5f103abSJordan Crouse #undef ROP_XOR
12b5f103abSJordan Crouse 
13b5f103abSJordan Crouse #include "a5xx.xml.h"
14b5f103abSJordan Crouse 
15b5f103abSJordan Crouse struct a5xx_gpu {
16b5f103abSJordan Crouse 	struct adreno_gpu base;
17b5f103abSJordan Crouse 
18b5f103abSJordan Crouse 	struct drm_gem_object *pm4_bo;
19b5f103abSJordan Crouse 	uint64_t pm4_iova;
20b5f103abSJordan Crouse 
21b5f103abSJordan Crouse 	struct drm_gem_object *pfp_bo;
22b5f103abSJordan Crouse 	uint64_t pfp_iova;
232401a008SJordan Crouse 
242401a008SJordan Crouse 	struct drm_gem_object *gpmu_bo;
252401a008SJordan Crouse 	uint64_t gpmu_iova;
262401a008SJordan Crouse 	uint32_t gpmu_dwords;
272401a008SJordan Crouse 
282401a008SJordan Crouse 	uint32_t lm_leakage;
29b1fc2839SJordan Crouse 
30b1fc2839SJordan Crouse 	struct msm_ringbuffer *cur_ring;
31b1fc2839SJordan Crouse 	struct msm_ringbuffer *next_ring;
32b1fc2839SJordan Crouse 
33b1fc2839SJordan Crouse 	struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
3434221545SJordan Crouse 	struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
35b1fc2839SJordan Crouse 	struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
36b1fc2839SJordan Crouse 	uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
37*cbd26fc9SVladimir Lypak 	uint32_t last_seqno[MSM_GPU_MAX_RINGS];
38b1fc2839SJordan Crouse 
39b1fc2839SJordan Crouse 	atomic_t preempt_state;
40d9bef5baSVladimir Lypak 	spinlock_t preempt_start_lock;
41b1fc2839SJordan Crouse 	struct timer_list preempt_timer;
428907afb4SJordan Crouse 
438907afb4SJordan Crouse 	struct drm_gem_object *shadow_bo;
448907afb4SJordan Crouse 	uint64_t shadow_iova;
458907afb4SJordan Crouse 	uint32_t *shadow;
468907afb4SJordan Crouse 
478907afb4SJordan Crouse 	/* True if the microcode supports the WHERE_AM_I opcode */
488907afb4SJordan Crouse 	bool has_whereami;
49b5f103abSJordan Crouse };
50b5f103abSJordan Crouse 
51b5f103abSJordan Crouse #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
52b5f103abSJordan Crouse 
53331dc0bcSRob Clark #ifdef CONFIG_DEBUG_FS
547ce84471SWambui Karuga void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
55331dc0bcSRob Clark #endif
56331dc0bcSRob Clark 
57b1fc2839SJordan Crouse /*
58b1fc2839SJordan Crouse  * In order to do lockless preemption we use a simple state machine to progress
59b1fc2839SJordan Crouse  * through the process.
60b1fc2839SJordan Crouse  *
61b1fc2839SJordan Crouse  * PREEMPT_NONE - no preemption in progress.  Next state START.
62b1fc2839SJordan Crouse  * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
63b1fc2839SJordan Crouse  * states: TRIGGERED, NONE
64b1fc2839SJordan Crouse  * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
65b1fc2839SJordan Crouse  * state: NONE.
66b1fc2839SJordan Crouse  * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
67b1fc2839SJordan Crouse  * states: FAULTED, PENDING
68b1fc2839SJordan Crouse  * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
69b1fc2839SJordan Crouse  * recovery.  Next state: N/A
70b1fc2839SJordan Crouse  * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
71b1fc2839SJordan Crouse  * checking the success of the operation. Next state: FAULTED, NONE.
72b1fc2839SJordan Crouse  */
73b1fc2839SJordan Crouse 
74b1fc2839SJordan Crouse enum preempt_state {
75b1fc2839SJordan Crouse 	PREEMPT_NONE = 0,
76b1fc2839SJordan Crouse 	PREEMPT_START,
77b1fc2839SJordan Crouse 	PREEMPT_ABORT,
78b1fc2839SJordan Crouse 	PREEMPT_TRIGGERED,
79b1fc2839SJordan Crouse 	PREEMPT_FAULTED,
80b1fc2839SJordan Crouse 	PREEMPT_PENDING,
81b1fc2839SJordan Crouse };
82b1fc2839SJordan Crouse 
83b1fc2839SJordan Crouse /*
84b1fc2839SJordan Crouse  * struct a5xx_preempt_record is a shared buffer between the microcode and the
85b1fc2839SJordan Crouse  * CPU to store the state for preemption. The record itself is much larger
86b1fc2839SJordan Crouse  * (64k) but most of that is used by the CP for storage.
87b1fc2839SJordan Crouse  *
88b1fc2839SJordan Crouse  * There is a preemption record assigned per ringbuffer. When the CPU triggers a
89b1fc2839SJordan Crouse  * preemption, it fills out the record with the useful information (wptr, ring
90b1fc2839SJordan Crouse  * base, etc) and the microcode uses that information to set up the CP following
91b1fc2839SJordan Crouse  * the preemption.  When a ring is switched out, the CP will save the ringbuffer
92b1fc2839SJordan Crouse  * state back to the record. In this way, once the records are properly set up
93b1fc2839SJordan Crouse  * the CPU can quickly switch back and forth between ringbuffers by only
94b1fc2839SJordan Crouse  * updating a few registers (often only the wptr).
95b1fc2839SJordan Crouse  *
96b1fc2839SJordan Crouse  * These are the CPU aware registers in the record:
97b1fc2839SJordan Crouse  * @magic: Must always be 0x27C4BAFC
98b1fc2839SJordan Crouse  * @info: Type of the record - written 0 by the CPU, updated by the CP
99b1fc2839SJordan Crouse  * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
100b1fc2839SJordan Crouse  * the CP
101b1fc2839SJordan Crouse  * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
102b1fc2839SJordan Crouse  * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
103b1fc2839SJordan Crouse  * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
104b1fc2839SJordan Crouse  * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
105b1fc2839SJordan Crouse  * @rbase: Value of RB_BASE written by CPU, save/restored by CP
106b1fc2839SJordan Crouse  * @counter: GPU address of the storage area for the performance counters
107b1fc2839SJordan Crouse  */
108b1fc2839SJordan Crouse struct a5xx_preempt_record {
109b1fc2839SJordan Crouse 	uint32_t magic;
110b1fc2839SJordan Crouse 	uint32_t info;
111b1fc2839SJordan Crouse 	uint32_t data;
112b1fc2839SJordan Crouse 	uint32_t cntl;
113b1fc2839SJordan Crouse 	uint32_t rptr;
114b1fc2839SJordan Crouse 	uint32_t wptr;
115b1fc2839SJordan Crouse 	uint64_t rptr_addr;
116b1fc2839SJordan Crouse 	uint64_t rbase;
117b1fc2839SJordan Crouse 	uint64_t counter;
118b1fc2839SJordan Crouse };
119b1fc2839SJordan Crouse 
120b1fc2839SJordan Crouse /* Magic identifier for the preemption record */
121b1fc2839SJordan Crouse #define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
122b1fc2839SJordan Crouse 
123b1fc2839SJordan Crouse /*
124b1fc2839SJordan Crouse  * Even though the structure above is only a few bytes, we need a full 64k to
125b1fc2839SJordan Crouse  * store the entire preemption record from the CP
126b1fc2839SJordan Crouse  */
127b1fc2839SJordan Crouse #define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
128b1fc2839SJordan Crouse 
129b1fc2839SJordan Crouse /*
130b1fc2839SJordan Crouse  * The preemption counter block is a storage area for the value of the
131b1fc2839SJordan Crouse  * preemption counters that are saved immediately before context switch. We
132b1fc2839SJordan Crouse  * append it on to the end of the allocation for the preemption record.
133b1fc2839SJordan Crouse  */
134b1fc2839SJordan Crouse #define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
135b1fc2839SJordan Crouse 
136b1fc2839SJordan Crouse 
1372401a008SJordan Crouse int a5xx_power_init(struct msm_gpu *gpu);
1382401a008SJordan Crouse void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
1392401a008SJordan Crouse 
spin_usecs(struct msm_gpu * gpu,uint32_t usecs,uint32_t reg,uint32_t mask,uint32_t value)1402401a008SJordan Crouse static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
1412401a008SJordan Crouse 		uint32_t reg, uint32_t mask, uint32_t value)
1422401a008SJordan Crouse {
1432401a008SJordan Crouse 	while (usecs--) {
1442401a008SJordan Crouse 		udelay(1);
1452401a008SJordan Crouse 		if ((gpu_read(gpu, reg) & mask) == value)
1462401a008SJordan Crouse 			return 0;
1472401a008SJordan Crouse 		cpu_relax();
1482401a008SJordan Crouse 	}
1492401a008SJordan Crouse 
1502401a008SJordan Crouse 	return -ETIMEDOUT;
1512401a008SJordan Crouse }
1522401a008SJordan Crouse 
1538907afb4SJordan Crouse #define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
1548907afb4SJordan Crouse 		((ring)->id * sizeof(uint32_t)))
1558907afb4SJordan Crouse 
156f97decacSJordan Crouse bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
1576e749e59SJordan Crouse void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
1582401a008SJordan Crouse 
159b1fc2839SJordan Crouse void a5xx_preempt_init(struct msm_gpu *gpu);
160b1fc2839SJordan Crouse void a5xx_preempt_hw_init(struct msm_gpu *gpu);
161b1fc2839SJordan Crouse void a5xx_preempt_trigger(struct msm_gpu *gpu);
162b1fc2839SJordan Crouse void a5xx_preempt_irq(struct msm_gpu *gpu);
163b1fc2839SJordan Crouse void a5xx_preempt_fini(struct msm_gpu *gpu);
164b1fc2839SJordan Crouse 
1658907afb4SJordan Crouse void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
1668907afb4SJordan Crouse 
167b1fc2839SJordan Crouse /* Return true if we are in a preempt state */
a5xx_in_preempt(struct a5xx_gpu * a5xx_gpu)168b1fc2839SJordan Crouse static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
169b1fc2839SJordan Crouse {
170b1fc2839SJordan Crouse 	int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
171b1fc2839SJordan Crouse 
172b1fc2839SJordan Crouse 	return !(preempt_state == PREEMPT_NONE ||
173b1fc2839SJordan Crouse 			preempt_state == PREEMPT_ABORT);
174b1fc2839SJordan Crouse }
175b1fc2839SJordan Crouse 
176b5f103abSJordan Crouse #endif /* __A5XX_GPU_H__ */
177