1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 30 #include "radeon.h" 31 #include "radeon_asic.h" 32 #include "sid.h" 33 34 #define VCE_V1_0_FW_SIZE (256 * 1024) 35 #define VCE_V1_0_STACK_SIZE (64 * 1024) 36 #define VCE_V1_0_DATA_SIZE (7808 * (RADEON_MAX_VCE_HANDLES + 1)) 37 38 struct vce_v1_0_fw_signature 39 { 40 int32_t off; 41 uint32_t len; 42 int32_t num; 43 struct { 44 uint32_t chip_id; 45 uint32_t keyselect; 46 uint32_t nonce[4]; 47 uint32_t sigval[4]; 48 } val[8]; 49 }; 50 51 /** 52 * vce_v1_0_get_rptr - get read pointer 53 * 54 * @rdev: radeon_device pointer 55 * @ring: radeon_ring pointer 56 * 57 * Returns the current hardware read pointer 58 */ 59 uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev, 60 struct radeon_ring *ring) 61 { 62 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) 63 return RREG32(VCE_RB_RPTR); 64 else 65 return RREG32(VCE_RB_RPTR2); 66 } 67 68 /** 69 * vce_v1_0_get_wptr - get write pointer 70 * 71 * @rdev: radeon_device pointer 72 * @ring: radeon_ring pointer 73 * 74 * Returns the current hardware write pointer 75 */ 76 uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev, 77 struct radeon_ring *ring) 78 { 79 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) 80 return RREG32(VCE_RB_WPTR); 81 else 82 return RREG32(VCE_RB_WPTR2); 83 } 84 85 /** 86 * vce_v1_0_set_wptr - set write pointer 87 * 88 * @rdev: radeon_device pointer 89 * @ring: radeon_ring pointer 90 * 91 * Commits the write pointer to the hardware 92 */ 93 void vce_v1_0_set_wptr(struct radeon_device *rdev, 94 struct radeon_ring *ring) 95 { 96 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) 97 WREG32(VCE_RB_WPTR, ring->wptr); 98 else 99 WREG32(VCE_RB_WPTR2, ring->wptr); 100 } 101 102 void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable) 103 { 104 u32 tmp; 105 106 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) { 107 tmp = RREG32(VCE_CLOCK_GATING_A); 108 tmp |= CGC_DYN_CLOCK_MODE; 109 WREG32(VCE_CLOCK_GATING_A, tmp); 110 111 tmp = RREG32(VCE_UENC_CLOCK_GATING); 112 tmp &= ~0x1ff000; 113 tmp |= 0xff800000; 114 WREG32(VCE_UENC_CLOCK_GATING, tmp); 115 116 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); 117 tmp &= ~0x3ff; 118 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); 119 } else { 120 tmp = RREG32(VCE_CLOCK_GATING_A); 121 tmp &= ~CGC_DYN_CLOCK_MODE; 122 WREG32(VCE_CLOCK_GATING_A, tmp); 123 124 tmp = RREG32(VCE_UENC_CLOCK_GATING); 125 tmp |= 0x1ff000; 126 tmp &= ~0xff800000; 127 WREG32(VCE_UENC_CLOCK_GATING, tmp); 128 129 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); 130 tmp |= 0x3ff; 131 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); 132 } 133 } 134 135 static void vce_v1_0_init_cg(struct radeon_device *rdev) 136 { 137 u32 tmp; 138 139 tmp = RREG32(VCE_CLOCK_GATING_A); 140 tmp |= CGC_DYN_CLOCK_MODE; 141 WREG32(VCE_CLOCK_GATING_A, tmp); 142 143 tmp = RREG32(VCE_CLOCK_GATING_B); 144 tmp |= 0x1e; 145 tmp &= ~0xe100e1; 146 WREG32(VCE_CLOCK_GATING_B, tmp); 147 148 tmp = RREG32(VCE_UENC_CLOCK_GATING); 149 tmp &= ~0xff9ff000; 150 WREG32(VCE_UENC_CLOCK_GATING, tmp); 151 152 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); 153 tmp &= ~0x3ff; 154 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); 155 } 156 157 int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) 158 { 159 struct vce_v1_0_fw_signature *sign = (void*)rdev->vce_fw->data; 160 uint32_t chip_id; 161 int i; 162 163 switch (rdev->family) { 164 case CHIP_TAHITI: 165 chip_id = 0x01000014; 166 break; 167 case CHIP_VERDE: 168 chip_id = 0x01000015; 169 break; 170 case CHIP_PITCAIRN: 171 case CHIP_OLAND: 172 chip_id = 0x01000016; 173 break; 174 case CHIP_ARUBA: 175 chip_id = 0x01000017; 176 break; 177 default: 178 return -EINVAL; 179 } 180 181 for (i = 0; i < le32_to_cpu(sign->num); ++i) { 182 if (le32_to_cpu(sign->val[i].chip_id) == chip_id) 183 break; 184 } 185 186 if (i == le32_to_cpu(sign->num)) 187 return -EINVAL; 188 189 data += (256 - 64) / 4; 190 data[0] = sign->val[i].nonce[0]; 191 data[1] = sign->val[i].nonce[1]; 192 data[2] = sign->val[i].nonce[2]; 193 data[3] = sign->val[i].nonce[3]; 194 data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64); 195 196 memset(&data[5], 0, 44); 197 memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); 198 199 data += (le32_to_cpu(sign->len) + 64) / 4; 200 data[0] = sign->val[i].sigval[0]; 201 data[1] = sign->val[i].sigval[1]; 202 data[2] = sign->val[i].sigval[2]; 203 data[3] = sign->val[i].sigval[3]; 204 205 rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect); 206 207 return 0; 208 } 209 210 unsigned vce_v1_0_bo_size(struct radeon_device *rdev) 211 { 212 WARN_ON(VCE_V1_0_FW_SIZE < rdev->vce_fw->size); 213 return VCE_V1_0_FW_SIZE + VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE; 214 } 215 216 int vce_v1_0_resume(struct radeon_device *rdev) 217 { 218 uint64_t addr = rdev->vce.gpu_addr; 219 uint32_t size; 220 int i; 221 222 WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16)); 223 WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 224 WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 225 WREG32(VCE_CLOCK_GATING_B, 0); 226 227 WREG32_P(VCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4); 228 229 WREG32(VCE_LMI_CTRL, 0x00398000); 230 WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1); 231 WREG32(VCE_LMI_SWAP_CNTL, 0); 232 WREG32(VCE_LMI_SWAP_CNTL1, 0); 233 WREG32(VCE_LMI_VM_CTRL, 0); 234 235 WREG32(VCE_VCPU_SCRATCH7, RADEON_MAX_VCE_HANDLES); 236 237 addr += 256; 238 size = VCE_V1_0_FW_SIZE; 239 WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); 240 WREG32(VCE_VCPU_CACHE_SIZE0, size); 241 242 addr += size; 243 size = VCE_V1_0_STACK_SIZE; 244 WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); 245 WREG32(VCE_VCPU_CACHE_SIZE1, size); 246 247 addr += size; 248 size = VCE_V1_0_DATA_SIZE; 249 WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); 250 WREG32(VCE_VCPU_CACHE_SIZE2, size); 251 252 WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100); 253 254 WREG32(VCE_LMI_FW_START_KEYSEL, rdev->vce.keyselect); 255 256 for (i = 0; i < 10; ++i) { 257 mdelay(10); 258 if (RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_DONE) 259 break; 260 } 261 262 if (i == 10) 263 return -ETIMEDOUT; 264 265 if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_PASS)) 266 return -EINVAL; 267 268 for (i = 0; i < 10; ++i) { 269 mdelay(10); 270 if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_BUSY)) 271 break; 272 } 273 274 if (i == 10) 275 return -ETIMEDOUT; 276 277 vce_v1_0_init_cg(rdev); 278 279 return 0; 280 } 281 282 /** 283 * vce_v1_0_start - start VCE block 284 * 285 * @rdev: radeon_device pointer 286 * 287 * Setup and start the VCE block 288 */ 289 int vce_v1_0_start(struct radeon_device *rdev) 290 { 291 struct radeon_ring *ring; 292 int i, j, r; 293 294 /* set BUSY flag */ 295 WREG32_P(VCE_STATUS, 1, ~1); 296 297 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; 298 WREG32(VCE_RB_RPTR, ring->wptr); 299 WREG32(VCE_RB_WPTR, ring->wptr); 300 WREG32(VCE_RB_BASE_LO, ring->gpu_addr); 301 WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 302 WREG32(VCE_RB_SIZE, ring->ring_size / 4); 303 304 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; 305 WREG32(VCE_RB_RPTR2, ring->wptr); 306 WREG32(VCE_RB_WPTR2, ring->wptr); 307 WREG32(VCE_RB_BASE_LO2, ring->gpu_addr); 308 WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 309 WREG32(VCE_RB_SIZE2, ring->ring_size / 4); 310 311 WREG32_P(VCE_VCPU_CNTL, VCE_CLK_EN, ~VCE_CLK_EN); 312 313 WREG32_P(VCE_SOFT_RESET, 314 VCE_ECPU_SOFT_RESET | 315 VCE_FME_SOFT_RESET, ~( 316 VCE_ECPU_SOFT_RESET | 317 VCE_FME_SOFT_RESET)); 318 319 mdelay(100); 320 321 WREG32_P(VCE_SOFT_RESET, 0, ~( 322 VCE_ECPU_SOFT_RESET | 323 VCE_FME_SOFT_RESET)); 324 325 for (i = 0; i < 10; ++i) { 326 uint32_t status; 327 for (j = 0; j < 100; ++j) { 328 status = RREG32(VCE_STATUS); 329 if (status & 2) 330 break; 331 mdelay(10); 332 } 333 r = 0; 334 if (status & 2) 335 break; 336 337 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 338 WREG32_P(VCE_SOFT_RESET, VCE_ECPU_SOFT_RESET, ~VCE_ECPU_SOFT_RESET); 339 mdelay(10); 340 WREG32_P(VCE_SOFT_RESET, 0, ~VCE_ECPU_SOFT_RESET); 341 mdelay(10); 342 r = -1; 343 } 344 345 /* clear BUSY flag */ 346 WREG32_P(VCE_STATUS, 0, ~1); 347 348 if (r) { 349 DRM_ERROR("VCE not responding, giving up!!!\n"); 350 return r; 351 } 352 353 return 0; 354 } 355 356 int vce_v1_0_init(struct radeon_device *rdev) 357 { 358 struct radeon_ring *ring; 359 int r; 360 361 r = vce_v1_0_start(rdev); 362 if (r) 363 return r; 364 365 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; 366 ring->ready = true; 367 r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring); 368 if (r) { 369 ring->ready = false; 370 return r; 371 } 372 373 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; 374 ring->ready = true; 375 r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring); 376 if (r) { 377 ring->ready = false; 378 return r; 379 } 380 381 DRM_INFO("VCE initialized successfully.\n"); 382 383 return 0; 384 } 385