xref: /openbmc/linux/drivers/gpu/drm/msm/adreno/a5xx_gpu.h (revision 34221545)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  */
4 #ifndef __A5XX_GPU_H__
5 #define __A5XX_GPU_H__
6 
7 #include "adreno_gpu.h"
8 
9 /* Bringing over the hack from the previous targets */
10 #undef ROP_COPY
11 #undef ROP_XOR
12 
13 #include "a5xx.xml.h"
14 
15 struct a5xx_gpu {
16 	struct adreno_gpu base;
17 
18 	struct drm_gem_object *pm4_bo;
19 	uint64_t pm4_iova;
20 
21 	struct drm_gem_object *pfp_bo;
22 	uint64_t pfp_iova;
23 
24 	struct drm_gem_object *gpmu_bo;
25 	uint64_t gpmu_iova;
26 	uint32_t gpmu_dwords;
27 
28 	uint32_t lm_leakage;
29 
30 	struct msm_ringbuffer *cur_ring;
31 	struct msm_ringbuffer *next_ring;
32 
33 	struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
34 	struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
35 	struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
36 	uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
37 
38 	atomic_t preempt_state;
39 	struct timer_list preempt_timer;
40 };
41 
42 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
43 
44 #ifdef CONFIG_DEBUG_FS
45 void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
46 #endif
47 
48 /*
49  * In order to do lockless preemption we use a simple state machine to progress
50  * through the process.
51  *
52  * PREEMPT_NONE - no preemption in progress.  Next state START.
53  * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
54  * states: TRIGGERED, NONE
55  * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
56  * state: NONE.
57  * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
58  * states: FAULTED, PENDING
59  * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
60  * recovery.  Next state: N/A
61  * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
62  * checking the success of the operation. Next state: FAULTED, NONE.
63  */
64 
65 enum preempt_state {
66 	PREEMPT_NONE = 0,
67 	PREEMPT_START,
68 	PREEMPT_ABORT,
69 	PREEMPT_TRIGGERED,
70 	PREEMPT_FAULTED,
71 	PREEMPT_PENDING,
72 };
73 
74 /*
75  * struct a5xx_preempt_record is a shared buffer between the microcode and the
76  * CPU to store the state for preemption. The record itself is much larger
77  * (64k) but most of that is used by the CP for storage.
78  *
79  * There is a preemption record assigned per ringbuffer. When the CPU triggers a
80  * preemption, it fills out the record with the useful information (wptr, ring
81  * base, etc) and the microcode uses that information to set up the CP following
82  * the preemption.  When a ring is switched out, the CP will save the ringbuffer
83  * state back to the record. In this way, once the records are properly set up
84  * the CPU can quickly switch back and forth between ringbuffers by only
85  * updating a few registers (often only the wptr).
86  *
87  * These are the CPU aware registers in the record:
88  * @magic: Must always be 0x27C4BAFC
89  * @info: Type of the record - written 0 by the CPU, updated by the CP
90  * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
91  * the CP
92  * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
93  * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
94  * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
95  * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
96  * @rbase: Value of RB_BASE written by CPU, save/restored by CP
97  * @counter: GPU address of the storage area for the performance counters
98  */
99 struct a5xx_preempt_record {
100 	uint32_t magic;
101 	uint32_t info;
102 	uint32_t data;
103 	uint32_t cntl;
104 	uint32_t rptr;
105 	uint32_t wptr;
106 	uint64_t rptr_addr;
107 	uint64_t rbase;
108 	uint64_t counter;
109 };
110 
111 /* Magic identifier for the preemption record */
112 #define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
113 
114 /*
115  * Even though the structure above is only a few bytes, we need a full 64k to
116  * store the entire preemption record from the CP
117  */
118 #define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
119 
120 /*
121  * The preemption counter block is a storage area for the value of the
122  * preemption counters that are saved immediately before context switch. We
123  * append it on to the end of the allocation for the preemption record.
124  */
125 #define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
126 
127 
128 int a5xx_power_init(struct msm_gpu *gpu);
129 void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
130 
131 static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
132 		uint32_t reg, uint32_t mask, uint32_t value)
133 {
134 	while (usecs--) {
135 		udelay(1);
136 		if ((gpu_read(gpu, reg) & mask) == value)
137 			return 0;
138 		cpu_relax();
139 	}
140 
141 	return -ETIMEDOUT;
142 }
143 
144 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
145 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
146 
147 void a5xx_preempt_init(struct msm_gpu *gpu);
148 void a5xx_preempt_hw_init(struct msm_gpu *gpu);
149 void a5xx_preempt_trigger(struct msm_gpu *gpu);
150 void a5xx_preempt_irq(struct msm_gpu *gpu);
151 void a5xx_preempt_fini(struct msm_gpu *gpu);
152 
153 /* Return true if we are in a preempt state */
154 static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
155 {
156 	int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
157 
158 	return !(preempt_state == PREEMPT_NONE ||
159 			preempt_state == PREEMPT_ABORT);
160 }
161 
162 #endif /* __A5XX_GPU_H__ */
163