1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/pm_opp.h>
6 #include "a5xx_gpu.h"
7 
8 /*
9  * The GPMU data block is a block of shared registers that can be used to
10  * communicate back and forth. These "registers" are by convention with the GPMU
11  * firwmare and not bound to any specific hardware design
12  */
13 
14 #define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
15 #define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
16 #define AGC_MSG_BASE (AGC_INIT_BASE + 7)
17 
18 #define AGC_MSG_STATE (AGC_MSG_BASE + 0)
19 #define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
20 #define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
21 #define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
22 
23 #define AGC_POWER_CONFIG_PRODUCTION_ID 1
24 #define AGC_INIT_MSG_VALUE 0xBABEFACE
25 
26 static struct {
27 	uint32_t reg;
28 	uint32_t value;
29 } a5xx_sequence_regs[] = {
30 	{ 0xB9A1, 0x00010303 },
31 	{ 0xB9A2, 0x13000000 },
32 	{ 0xB9A3, 0x00460020 },
33 	{ 0xB9A4, 0x10000000 },
34 	{ 0xB9A5, 0x040A1707 },
35 	{ 0xB9A6, 0x00010000 },
36 	{ 0xB9A7, 0x0E000904 },
37 	{ 0xB9A8, 0x10000000 },
38 	{ 0xB9A9, 0x01165000 },
39 	{ 0xB9AA, 0x000E0002 },
40 	{ 0xB9AB, 0x03884141 },
41 	{ 0xB9AC, 0x10000840 },
42 	{ 0xB9AD, 0x572A5000 },
43 	{ 0xB9AE, 0x00000003 },
44 	{ 0xB9AF, 0x00000000 },
45 	{ 0xB9B0, 0x10000000 },
46 	{ 0xB828, 0x6C204010 },
47 	{ 0xB829, 0x6C204011 },
48 	{ 0xB82A, 0x6C204012 },
49 	{ 0xB82B, 0x6C204013 },
50 	{ 0xB82C, 0x6C204014 },
51 	{ 0xB90F, 0x00000004 },
52 	{ 0xB910, 0x00000002 },
53 	{ 0xB911, 0x00000002 },
54 	{ 0xB912, 0x00000002 },
55 	{ 0xB913, 0x00000002 },
56 	{ 0xB92F, 0x00000004 },
57 	{ 0xB930, 0x00000005 },
58 	{ 0xB931, 0x00000005 },
59 	{ 0xB932, 0x00000005 },
60 	{ 0xB933, 0x00000005 },
61 	{ 0xB96F, 0x00000001 },
62 	{ 0xB970, 0x00000003 },
63 	{ 0xB94F, 0x00000004 },
64 	{ 0xB950, 0x0000000B },
65 	{ 0xB951, 0x0000000B },
66 	{ 0xB952, 0x0000000B },
67 	{ 0xB953, 0x0000000B },
68 	{ 0xB907, 0x00000019 },
69 	{ 0xB927, 0x00000019 },
70 	{ 0xB947, 0x00000019 },
71 	{ 0xB967, 0x00000019 },
72 	{ 0xB987, 0x00000019 },
73 	{ 0xB906, 0x00220001 },
74 	{ 0xB926, 0x00220001 },
75 	{ 0xB946, 0x00220001 },
76 	{ 0xB966, 0x00220001 },
77 	{ 0xB986, 0x00300000 },
78 	{ 0xAC40, 0x0340FF41 },
79 	{ 0xAC41, 0x03BEFED0 },
80 	{ 0xAC42, 0x00331FED },
81 	{ 0xAC43, 0x021FFDD3 },
82 	{ 0xAC44, 0x5555AAAA },
83 	{ 0xAC45, 0x5555AAAA },
84 	{ 0xB9BA, 0x00000008 },
85 };
86 
87 /*
88  * Get the actual voltage value for the operating point at the specified
89  * frequency
90  */
91 static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
92 {
93 	struct drm_device *dev = gpu->dev;
94 	struct msm_drm_private *priv = dev->dev_private;
95 	struct platform_device *pdev = priv->gpu_pdev;
96 	struct dev_pm_opp *opp;
97 	u32 ret = 0;
98 
99 	opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
100 
101 	if (!IS_ERR(opp)) {
102 		ret = dev_pm_opp_get_voltage(opp) / 1000;
103 		dev_pm_opp_put(opp);
104 	}
105 
106 	return ret;
107 }
108 
109 /* Setup thermal limit management */
110 static void a5xx_lm_setup(struct msm_gpu *gpu)
111 {
112 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
113 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
114 	unsigned int i;
115 
116 	/* Write the block of sequence registers */
117 	for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
118 		gpu_write(gpu, a5xx_sequence_regs[i].reg,
119 			a5xx_sequence_regs[i].value);
120 
121 	/* Hard code the A530 GPU thermal sensor ID for the GPMU */
122 	gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
123 	gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
124 	gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
125 
126 	/* Until we get clock scaling 0 is always the active power level */
127 	gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
128 
129 	gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
130 
131 	/* The threshold is fixed at 6000 for A530 */
132 	gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
133 
134 	gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
135 	gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
136 
137 	/* Write the voltage table */
138 	gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
139 	gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
140 
141 	gpu_write(gpu, AGC_MSG_STATE, 1);
142 	gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
143 
144 	/* Write the max power - hard coded to 5448 for A530 */
145 	gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
146 	gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
147 
148 	/*
149 	 * For now just write the one voltage level - we will do more when we
150 	 * can do scaling
151 	 */
152 	gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
153 	gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
154 
155 	gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
156 	gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
157 }
158 
159 /* Enable SP/TP cpower collapse */
160 static void a5xx_pc_init(struct msm_gpu *gpu)
161 {
162 	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
163 	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
164 	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
165 	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
166 }
167 
168 /* Enable the GPMU microcontroller */
169 static int a5xx_gpmu_init(struct msm_gpu *gpu)
170 {
171 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
172 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
173 	struct msm_ringbuffer *ring = gpu->rb[0];
174 
175 	if (!a5xx_gpu->gpmu_dwords)
176 		return 0;
177 
178 	/* Turn off protected mode for this operation */
179 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
180 	OUT_RING(ring, 0);
181 
182 	/* Kick off the IB to load the GPMU microcode */
183 	OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
184 	OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
185 	OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
186 	OUT_RING(ring, a5xx_gpu->gpmu_dwords);
187 
188 	/* Turn back on protected mode */
189 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
190 	OUT_RING(ring, 1);
191 
192 	gpu->funcs->flush(gpu, ring);
193 
194 	if (!a5xx_idle(gpu, ring)) {
195 		DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
196 			gpu->name);
197 		return -EINVAL;
198 	}
199 
200 	gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
201 
202 	/* Kick off the GPMU */
203 	gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
204 
205 	/*
206 	 * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
207 	 * won't have advanced power collapse.
208 	 */
209 	if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
210 		0xBABEFACE))
211 		DRM_ERROR("%s: GPMU firmware initialization timed out\n",
212 			gpu->name);
213 
214 	return 0;
215 }
216 
217 /* Enable limits management */
218 static void a5xx_lm_enable(struct msm_gpu *gpu)
219 {
220 	gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
221 	gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
222 	gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
223 	gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
224 	gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
225 
226 	gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
227 }
228 
229 int a5xx_power_init(struct msm_gpu *gpu)
230 {
231 	int ret;
232 
233 	/* Set up the limits management */
234 	a5xx_lm_setup(gpu);
235 
236 	/* Set up SP/TP power collpase */
237 	a5xx_pc_init(gpu);
238 
239 	/* Start the GPMU */
240 	ret = a5xx_gpmu_init(gpu);
241 	if (ret)
242 		return ret;
243 
244 	/* Start the limits management */
245 	a5xx_lm_enable(gpu);
246 
247 	return 0;
248 }
249 
250 void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
251 {
252 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
253 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
254 	struct drm_device *drm = gpu->dev;
255 	uint32_t dwords = 0, offset = 0, bosize;
256 	unsigned int *data, *ptr, *cmds;
257 	unsigned int cmds_size;
258 
259 	if (a5xx_gpu->gpmu_bo)
260 		return;
261 
262 	data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
263 
264 	/*
265 	 * The first dword is the size of the remaining data in dwords. Use it
266 	 * as a checksum of sorts and make sure it matches the actual size of
267 	 * the firmware that we read
268 	 */
269 
270 	if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
271 		(data[0] < 2) || (data[0] >=
272 			(adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
273 		return;
274 
275 	/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
276 	if (data[1] != 2)
277 		return;
278 
279 	cmds = data + data[2] + 3;
280 	cmds_size = data[0] - data[2] - 2;
281 
282 	/*
283 	 * A single type4 opcode can only have so many values attached so
284 	 * add enough opcodes to load the all the commands
285 	 */
286 	bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
287 
288 	ptr = msm_gem_kernel_new_locked(drm, bosize,
289 		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
290 		&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
291 	if (IS_ERR(ptr))
292 		return;
293 
294 	msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
295 
296 	while (cmds_size > 0) {
297 		int i;
298 		uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
299 			TYPE4_MAX_PAYLOAD : cmds_size;
300 
301 		ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
302 			_size);
303 
304 		for (i = 0; i < _size; i++)
305 			ptr[dwords++] = *cmds++;
306 
307 		offset += _size;
308 		cmds_size -= _size;
309 	}
310 
311 	msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
312 	a5xx_gpu->gpmu_dwords = dwords;
313 }
314