1 /* Copyright (c) 2016 The Linux Foundation. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 */ 13 14 #include <linux/pm_opp.h> 15 #include "a5xx_gpu.h" 16 17 /* 18 * The GPMU data block is a block of shared registers that can be used to 19 * communicate back and forth. These "registers" are by convention with the GPMU 20 * firwmare and not bound to any specific hardware design 21 */ 22 23 #define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE 24 #define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5) 25 #define AGC_MSG_BASE (AGC_INIT_BASE + 7) 26 27 #define AGC_MSG_STATE (AGC_MSG_BASE + 0) 28 #define AGC_MSG_COMMAND (AGC_MSG_BASE + 1) 29 #define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3) 30 #define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o)) 31 32 #define AGC_POWER_CONFIG_PRODUCTION_ID 1 33 #define AGC_INIT_MSG_VALUE 0xBABEFACE 34 35 static struct { 36 uint32_t reg; 37 uint32_t value; 38 } a5xx_sequence_regs[] = { 39 { 0xB9A1, 0x00010303 }, 40 { 0xB9A2, 0x13000000 }, 41 { 0xB9A3, 0x00460020 }, 42 { 0xB9A4, 0x10000000 }, 43 { 0xB9A5, 0x040A1707 }, 44 { 0xB9A6, 0x00010000 }, 45 { 0xB9A7, 0x0E000904 }, 46 { 0xB9A8, 0x10000000 }, 47 { 0xB9A9, 0x01165000 }, 48 { 0xB9AA, 0x000E0002 }, 49 { 0xB9AB, 0x03884141 }, 50 { 0xB9AC, 0x10000840 }, 51 { 0xB9AD, 0x572A5000 }, 52 { 0xB9AE, 0x00000003 }, 53 { 0xB9AF, 0x00000000 }, 54 { 0xB9B0, 0x10000000 }, 55 { 0xB828, 0x6C204010 }, 56 { 0xB829, 0x6C204011 }, 57 { 0xB82A, 0x6C204012 }, 58 { 0xB82B, 0x6C204013 }, 59 { 0xB82C, 0x6C204014 }, 60 { 0xB90F, 0x00000004 }, 61 { 0xB910, 0x00000002 }, 62 { 0xB911, 0x00000002 }, 63 { 0xB912, 0x00000002 }, 64 { 0xB913, 0x00000002 }, 65 { 0xB92F, 0x00000004 }, 66 { 0xB930, 0x00000005 }, 67 { 0xB931, 0x00000005 }, 68 { 0xB932, 0x00000005 }, 69 { 0xB933, 0x00000005 }, 70 { 0xB96F, 0x00000001 }, 71 { 0xB970, 0x00000003 }, 72 { 0xB94F, 0x00000004 }, 73 { 0xB950, 0x0000000B }, 74 { 0xB951, 0x0000000B }, 75 { 0xB952, 0x0000000B }, 76 { 0xB953, 0x0000000B }, 77 { 0xB907, 0x00000019 }, 78 { 0xB927, 0x00000019 }, 79 { 0xB947, 0x00000019 }, 80 { 0xB967, 0x00000019 }, 81 { 0xB987, 0x00000019 }, 82 { 0xB906, 0x00220001 }, 83 { 0xB926, 0x00220001 }, 84 { 0xB946, 0x00220001 }, 85 { 0xB966, 0x00220001 }, 86 { 0xB986, 0x00300000 }, 87 { 0xAC40, 0x0340FF41 }, 88 { 0xAC41, 0x03BEFED0 }, 89 { 0xAC42, 0x00331FED }, 90 { 0xAC43, 0x021FFDD3 }, 91 { 0xAC44, 0x5555AAAA }, 92 { 0xAC45, 0x5555AAAA }, 93 { 0xB9BA, 0x00000008 }, 94 }; 95 96 /* 97 * Get the actual voltage value for the operating point at the specified 98 * frequency 99 */ 100 static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq) 101 { 102 struct drm_device *dev = gpu->dev; 103 struct msm_drm_private *priv = dev->dev_private; 104 struct platform_device *pdev = priv->gpu_pdev; 105 struct dev_pm_opp *opp; 106 107 opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true); 108 109 return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0; 110 } 111 112 /* Setup thermal limit management */ 113 static void a5xx_lm_setup(struct msm_gpu *gpu) 114 { 115 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 116 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 117 unsigned int i; 118 119 /* Write the block of sequence registers */ 120 for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++) 121 gpu_write(gpu, a5xx_sequence_regs[i].reg, 122 a5xx_sequence_regs[i].value); 123 124 /* Hard code the A530 GPU thermal sensor ID for the GPMU */ 125 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007); 126 gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01); 127 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01); 128 129 /* Until we get clock scaling 0 is always the active power level */ 130 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); 131 132 gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage); 133 134 /* The threshold is fixed at 6000 for A530 */ 135 gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000); 136 137 gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF); 138 gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1); 139 140 /* Write the voltage table */ 141 gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF); 142 gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1); 143 144 gpu_write(gpu, AGC_MSG_STATE, 1); 145 gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID); 146 147 /* Write the max power - hard coded to 5448 for A530 */ 148 gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448); 149 gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1); 150 151 /* 152 * For now just write the one voltage level - we will do more when we 153 * can do scaling 154 */ 155 gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); 156 gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); 157 158 gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t)); 159 gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE); 160 } 161 162 /* Enable SP/TP cpower collapse */ 163 static void a5xx_pc_init(struct msm_gpu *gpu) 164 { 165 gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F); 166 gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0); 167 gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080); 168 gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040); 169 } 170 171 /* Enable the GPMU microcontroller */ 172 static int a5xx_gpmu_init(struct msm_gpu *gpu) 173 { 174 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 175 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 176 struct msm_ringbuffer *ring = gpu->rb[0]; 177 178 if (!a5xx_gpu->gpmu_dwords) 179 return 0; 180 181 /* Turn off protected mode for this operation */ 182 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); 183 OUT_RING(ring, 0); 184 185 /* Kick off the IB to load the GPMU microcode */ 186 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); 187 OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova)); 188 OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova)); 189 OUT_RING(ring, a5xx_gpu->gpmu_dwords); 190 191 /* Turn back on protected mode */ 192 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); 193 OUT_RING(ring, 1); 194 195 gpu->funcs->flush(gpu, ring); 196 197 if (!a5xx_idle(gpu, ring)) { 198 DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", 199 gpu->name); 200 return -EINVAL; 201 } 202 203 gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014); 204 205 /* Kick off the GPMU */ 206 gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0); 207 208 /* 209 * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just 210 * won't have advanced power collapse. 211 */ 212 if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF, 213 0xBABEFACE)) 214 DRM_ERROR("%s: GPMU firmware initialization timed out\n", 215 gpu->name); 216 217 return 0; 218 } 219 220 /* Enable limits management */ 221 static void a5xx_lm_enable(struct msm_gpu *gpu) 222 { 223 gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0); 224 gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A); 225 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01); 226 gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000); 227 gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000); 228 229 gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011); 230 } 231 232 int a5xx_power_init(struct msm_gpu *gpu) 233 { 234 int ret; 235 236 /* Set up the limits management */ 237 a5xx_lm_setup(gpu); 238 239 /* Set up SP/TP power collpase */ 240 a5xx_pc_init(gpu); 241 242 /* Start the GPMU */ 243 ret = a5xx_gpmu_init(gpu); 244 if (ret) 245 return ret; 246 247 /* Start the limits management */ 248 a5xx_lm_enable(gpu); 249 250 return 0; 251 } 252 253 void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) 254 { 255 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 256 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 257 struct drm_device *drm = gpu->dev; 258 const struct firmware *fw; 259 uint32_t dwords = 0, offset = 0, bosize; 260 unsigned int *data, *ptr, *cmds; 261 unsigned int cmds_size; 262 263 if (a5xx_gpu->gpmu_bo) 264 return; 265 266 /* Get the firmware */ 267 fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw); 268 if (IS_ERR(fw)) { 269 DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n", 270 gpu->name); 271 return; 272 } 273 274 data = (unsigned int *) fw->data; 275 276 /* 277 * The first dword is the size of the remaining data in dwords. Use it 278 * as a checksum of sorts and make sure it matches the actual size of 279 * the firmware that we read 280 */ 281 282 if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2))) 283 goto out; 284 285 /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */ 286 if (data[1] != 2) 287 goto out; 288 289 cmds = data + data[2] + 3; 290 cmds_size = data[0] - data[2] - 2; 291 292 /* 293 * A single type4 opcode can only have so many values attached so 294 * add enough opcodes to load the all the commands 295 */ 296 bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; 297 298 ptr = msm_gem_kernel_new_locked(drm, bosize, 299 MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, 300 &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); 301 if (IS_ERR(ptr)) 302 goto err; 303 304 while (cmds_size > 0) { 305 int i; 306 uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ? 307 TYPE4_MAX_PAYLOAD : cmds_size; 308 309 ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset, 310 _size); 311 312 for (i = 0; i < _size; i++) 313 ptr[dwords++] = *cmds++; 314 315 offset += _size; 316 cmds_size -= _size; 317 } 318 319 msm_gem_put_vaddr(a5xx_gpu->gpmu_bo); 320 a5xx_gpu->gpmu_dwords = dwords; 321 322 goto out; 323 324 err: 325 if (a5xx_gpu->gpmu_iova) 326 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); 327 if (a5xx_gpu->gpmu_bo) 328 drm_gem_object_unreference(a5xx_gpu->gpmu_bo); 329 330 a5xx_gpu->gpmu_bo = NULL; 331 a5xx_gpu->gpmu_iova = 0; 332 a5xx_gpu->gpmu_dwords = 0; 333 334 out: 335 /* No need to keep that firmware laying around anymore */ 336 release_firmware(fw); 337 } 338