1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved. 7 */ 8 9 #ifndef __ADRENO_GPU_H__ 10 #define __ADRENO_GPU_H__ 11 12 #include <linux/firmware.h> 13 #include <linux/iopoll.h> 14 15 #include "msm_gpu.h" 16 17 #include "adreno_common.xml.h" 18 #include "adreno_pm4.xml.h" 19 20 extern bool snapshot_debugbus; 21 extern bool allow_vram_carveout; 22 23 enum { 24 ADRENO_FW_PM4 = 0, 25 ADRENO_FW_SQE = 0, /* a6xx */ 26 ADRENO_FW_PFP = 1, 27 ADRENO_FW_GMU = 1, /* a6xx */ 28 ADRENO_FW_GPMU = 2, 29 ADRENO_FW_MAX, 30 }; 31 32 #define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0) 33 #define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1) 34 #define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) 35 36 struct adreno_rev { 37 uint8_t core; 38 uint8_t major; 39 uint8_t minor; 40 uint8_t patchid; 41 }; 42 43 #define ANY_ID 0xff 44 45 #define ADRENO_REV(core, major, minor, patchid) \ 46 ((struct adreno_rev){ core, major, minor, patchid }) 47 48 struct adreno_gpu_funcs { 49 struct msm_gpu_funcs base; 50 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 51 }; 52 53 struct adreno_reglist { 54 u32 offset; 55 u32 value; 56 }; 57 58 extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[]; 59 extern const struct adreno_reglist a660_hwcg[], a690_hwcg[]; 60 61 struct adreno_info { 62 struct adreno_rev rev; 63 uint32_t revn; 64 const char *fw[ADRENO_FW_MAX]; 65 uint32_t gmem; 66 u64 quirks; 67 struct msm_gpu *(*init)(struct drm_device *dev); 68 const char *zapfw; 69 u32 inactive_period; 70 const struct adreno_reglist *hwcg; 71 u64 address_space_size; 72 }; 73 74 const struct adreno_info *adreno_info(struct adreno_rev rev); 75 76 struct adreno_gpu { 77 struct msm_gpu base; 78 struct adreno_rev rev; 79 const struct adreno_info *info; 80 uint16_t speedbin; 81 const struct adreno_gpu_funcs *funcs; 82 83 /* interesting register offsets to dump: */ 84 const unsigned int *registers; 85 86 /* 87 * Are we loading fw from legacy path? Prior to addition 88 * of gpu firmware to linux-firmware, the fw files were 89 * placed in toplevel firmware directory, following qcom's 90 * android kernel. But linux-firmware preferred they be 91 * placed in a 'qcom' subdirectory. 92 * 93 * For backwards compatibility, we try first to load from 94 * the new path, using request_firmware_direct() to avoid 95 * any potential timeout waiting for usermode helper, then 96 * fall back to the old path (with direct load). And 97 * finally fall back to request_firmware() with the new 98 * path to allow the usermode helper. 99 */ 100 enum { 101 FW_LOCATION_UNKNOWN = 0, 102 FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */ 103 FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */ 104 FW_LOCATION_HELPER, 105 } fwloc; 106 107 /* firmware: */ 108 const struct firmware *fw[ADRENO_FW_MAX]; 109 110 /* 111 * Register offsets are different between some GPUs. 112 * GPU specific offsets will be exported by GPU specific 113 * code (a3xx_gpu.c) and stored in this common location. 114 */ 115 const unsigned int *reg_offsets; 116 bool gmu_is_wrapper; 117 }; 118 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) 119 120 struct adreno_ocmem { 121 struct ocmem *ocmem; 122 unsigned long base; 123 void *hdl; 124 }; 125 126 /* platform config data (ie. from DT, or pdata) */ 127 struct adreno_platform_config { 128 struct adreno_rev rev; 129 }; 130 131 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) 132 133 #define spin_until(X) ({ \ 134 int __ret = -ETIMEDOUT; \ 135 unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \ 136 do { \ 137 if (X) { \ 138 __ret = 0; \ 139 break; \ 140 } \ 141 } while (time_before(jiffies, __t)); \ 142 __ret; \ 143 }) 144 145 bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2); 146 147 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) 148 { 149 if (WARN_ON_ONCE(!gpu->info)) 150 return false; 151 return gpu->info->revn == revn; 152 } 153 154 static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu) 155 { 156 return gpu->gmu_is_wrapper; 157 } 158 159 static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu) 160 { 161 if (WARN_ON_ONCE(!gpu->info)) 162 return false; 163 return (gpu->info->revn < 300); 164 } 165 166 static inline bool adreno_is_a20x(const struct adreno_gpu *gpu) 167 { 168 if (WARN_ON_ONCE(!gpu->info)) 169 return false; 170 return (gpu->info->revn < 210); 171 } 172 173 static inline bool adreno_is_a225(const struct adreno_gpu *gpu) 174 { 175 return adreno_is_revn(gpu, 225); 176 } 177 178 static inline bool adreno_is_a305(const struct adreno_gpu *gpu) 179 { 180 return adreno_is_revn(gpu, 305); 181 } 182 183 static inline bool adreno_is_a306(const struct adreno_gpu *gpu) 184 { 185 /* yes, 307, because a305c is 306 */ 186 return adreno_is_revn(gpu, 307); 187 } 188 189 static inline bool adreno_is_a320(const struct adreno_gpu *gpu) 190 { 191 return adreno_is_revn(gpu, 320); 192 } 193 194 static inline bool adreno_is_a330(const struct adreno_gpu *gpu) 195 { 196 return adreno_is_revn(gpu, 330); 197 } 198 199 static inline bool adreno_is_a330v2(const struct adreno_gpu *gpu) 200 { 201 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); 202 } 203 204 static inline int adreno_is_a405(const struct adreno_gpu *gpu) 205 { 206 return adreno_is_revn(gpu, 405); 207 } 208 209 static inline int adreno_is_a420(const struct adreno_gpu *gpu) 210 { 211 return adreno_is_revn(gpu, 420); 212 } 213 214 static inline int adreno_is_a430(const struct adreno_gpu *gpu) 215 { 216 return adreno_is_revn(gpu, 430); 217 } 218 219 static inline int adreno_is_a506(const struct adreno_gpu *gpu) 220 { 221 return adreno_is_revn(gpu, 506); 222 } 223 224 static inline int adreno_is_a508(const struct adreno_gpu *gpu) 225 { 226 return adreno_is_revn(gpu, 508); 227 } 228 229 static inline int adreno_is_a509(const struct adreno_gpu *gpu) 230 { 231 return adreno_is_revn(gpu, 509); 232 } 233 234 static inline int adreno_is_a510(const struct adreno_gpu *gpu) 235 { 236 return adreno_is_revn(gpu, 510); 237 } 238 239 static inline int adreno_is_a512(const struct adreno_gpu *gpu) 240 { 241 return adreno_is_revn(gpu, 512); 242 } 243 244 static inline int adreno_is_a530(const struct adreno_gpu *gpu) 245 { 246 return adreno_is_revn(gpu, 530); 247 } 248 249 static inline int adreno_is_a540(const struct adreno_gpu *gpu) 250 { 251 return adreno_is_revn(gpu, 540); 252 } 253 254 static inline int adreno_is_a610(const struct adreno_gpu *gpu) 255 { 256 return adreno_is_revn(gpu, 610); 257 } 258 259 static inline int adreno_is_a618(const struct adreno_gpu *gpu) 260 { 261 return adreno_is_revn(gpu, 618); 262 } 263 264 static inline int adreno_is_a619(const struct adreno_gpu *gpu) 265 { 266 return adreno_is_revn(gpu, 619); 267 } 268 269 static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu) 270 { 271 return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu); 272 } 273 274 static inline int adreno_is_a630(const struct adreno_gpu *gpu) 275 { 276 return adreno_is_revn(gpu, 630); 277 } 278 279 static inline int adreno_is_a640(const struct adreno_gpu *gpu) 280 { 281 return adreno_is_revn(gpu, 640); 282 } 283 284 static inline int adreno_is_a650(const struct adreno_gpu *gpu) 285 { 286 return adreno_is_revn(gpu, 650); 287 } 288 289 static inline int adreno_is_7c3(const struct adreno_gpu *gpu) 290 { 291 /* The order of args is important here to handle ANY_ID correctly */ 292 return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev); 293 } 294 295 static inline int adreno_is_a660(const struct adreno_gpu *gpu) 296 { 297 return adreno_is_revn(gpu, 660); 298 } 299 300 static inline int adreno_is_a680(const struct adreno_gpu *gpu) 301 { 302 return adreno_is_revn(gpu, 680); 303 } 304 305 static inline int adreno_is_a690(const struct adreno_gpu *gpu) 306 { 307 /* The order of args is important here to handle ANY_ID correctly */ 308 return adreno_cmp_rev(ADRENO_REV(6, 9, 0, ANY_ID), gpu->rev); 309 }; 310 311 /* check for a615, a616, a618, a619 or any derivatives */ 312 static inline int adreno_is_a615_family(const struct adreno_gpu *gpu) 313 { 314 return adreno_is_revn(gpu, 615) || 315 adreno_is_revn(gpu, 616) || 316 adreno_is_revn(gpu, 618) || 317 adreno_is_revn(gpu, 619); 318 } 319 320 static inline int adreno_is_a660_family(const struct adreno_gpu *gpu) 321 { 322 return adreno_is_a660(gpu) || adreno_is_a690(gpu) || adreno_is_7c3(gpu); 323 } 324 325 /* check for a650, a660, or any derivatives */ 326 static inline int adreno_is_a650_family(const struct adreno_gpu *gpu) 327 { 328 return adreno_is_revn(gpu, 650) || 329 adreno_is_revn(gpu, 620) || 330 adreno_is_a660_family(gpu); 331 } 332 333 static inline int adreno_is_a640_family(const struct adreno_gpu *gpu) 334 { 335 return adreno_is_a640(gpu) || adreno_is_a680(gpu); 336 } 337 338 u64 adreno_private_address_space_size(struct msm_gpu *gpu); 339 int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, 340 uint32_t param, uint64_t *value, uint32_t *len); 341 int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, 342 uint32_t param, uint64_t value, uint32_t len); 343 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, 344 const char *fwname); 345 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, 346 const struct firmware *fw, u64 *iova); 347 int adreno_hw_init(struct msm_gpu *gpu); 348 void adreno_recover(struct msm_gpu *gpu); 349 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg); 350 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 351 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) 352 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, 353 struct drm_printer *p); 354 #endif 355 void adreno_dump_info(struct msm_gpu *gpu); 356 void adreno_dump(struct msm_gpu *gpu); 357 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); 358 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu); 359 360 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, 361 struct adreno_ocmem *ocmem); 362 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem); 363 364 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 365 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, 366 int nr_rings); 367 void adreno_gpu_cleanup(struct adreno_gpu *gpu); 368 int adreno_load_fw(struct adreno_gpu *adreno_gpu); 369 370 void adreno_gpu_state_destroy(struct msm_gpu_state *state); 371 372 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state); 373 int adreno_gpu_state_put(struct msm_gpu_state *state); 374 void adreno_show_object(struct drm_printer *p, void **ptr, int len, 375 bool *encoded); 376 377 /* 378 * Common helper function to initialize the default address space for arm-smmu 379 * attached targets 380 */ 381 struct msm_gem_address_space * 382 adreno_create_address_space(struct msm_gpu *gpu, 383 struct platform_device *pdev); 384 385 struct msm_gem_address_space * 386 adreno_iommu_create_address_space(struct msm_gpu *gpu, 387 struct platform_device *pdev, 388 unsigned long quirks); 389 390 int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, 391 struct adreno_smmu_fault_info *info, const char *block, 392 u32 scratch[4]); 393 394 int adreno_read_speedbin(struct device *dev, u32 *speedbin); 395 396 /* 397 * For a5xx and a6xx targets load the zap shader that is used to pull the GPU 398 * out of secure mode 399 */ 400 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid); 401 402 /* ringbuffer helpers (the parts that are adreno specific) */ 403 404 static inline void 405 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) 406 { 407 adreno_wait_ring(ring, cnt+1); 408 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); 409 } 410 411 /* no-op packet: */ 412 static inline void 413 OUT_PKT2(struct msm_ringbuffer *ring) 414 { 415 adreno_wait_ring(ring, 1); 416 OUT_RING(ring, CP_TYPE2_PKT); 417 } 418 419 static inline void 420 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) 421 { 422 adreno_wait_ring(ring, cnt+1); 423 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); 424 } 425 426 static inline u32 PM4_PARITY(u32 val) 427 { 428 return (0x9669 >> (0xF & (val ^ 429 (val >> 4) ^ (val >> 8) ^ (val >> 12) ^ 430 (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^ 431 (val >> 28)))) & 1; 432 } 433 434 /* Maximum number of values that can be executed for one opcode */ 435 #define TYPE4_MAX_PAYLOAD 127 436 437 #define PKT4(_reg, _cnt) \ 438 (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \ 439 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27)) 440 441 static inline void 442 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) 443 { 444 adreno_wait_ring(ring, cnt + 1); 445 OUT_RING(ring, PKT4(regindx, cnt)); 446 } 447 448 static inline void 449 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) 450 { 451 adreno_wait_ring(ring, cnt + 1); 452 OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | 453 ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); 454 } 455 456 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); 457 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); 458 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); 459 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); 460 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev); 461 462 static inline uint32_t get_wptr(struct msm_ringbuffer *ring) 463 { 464 return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); 465 } 466 467 /* 468 * Given a register and a count, return a value to program into 469 * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len 470 * registers starting at _reg. 471 * 472 * The register base needs to be a multiple of the length. If it is not, the 473 * hardware will quietly mask off the bits for you and shift the size. For 474 * example, if you intend the protection to start at 0x07 for a length of 4 475 * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might 476 * expose registers you intended to protect! 477 */ 478 #define ADRENO_PROTECT_RW(_reg, _len) \ 479 ((1 << 30) | (1 << 29) | \ 480 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) 481 482 /* 483 * Same as above, but allow reads over the range. For areas of mixed use (such 484 * as performance counters) this allows us to protect a much larger range with a 485 * single register 486 */ 487 #define ADRENO_PROTECT_RDONLY(_reg, _len) \ 488 ((1 << 29) \ 489 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) 490 491 492 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \ 493 readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \ 494 interval, timeout) 495 496 #endif /* __ADRENO_GPU_H__ */ 497