1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved. 7 */ 8 9 #ifndef __ADRENO_GPU_H__ 10 #define __ADRENO_GPU_H__ 11 12 #include <linux/firmware.h> 13 #include <linux/iopoll.h> 14 15 #include "msm_gpu.h" 16 17 #include "adreno_common.xml.h" 18 #include "adreno_pm4.xml.h" 19 20 extern bool snapshot_debugbus; 21 extern bool allow_vram_carveout; 22 23 enum { 24 ADRENO_FW_PM4 = 0, 25 ADRENO_FW_SQE = 0, /* a6xx */ 26 ADRENO_FW_PFP = 1, 27 ADRENO_FW_GMU = 1, /* a6xx */ 28 ADRENO_FW_GPMU = 2, 29 ADRENO_FW_MAX, 30 }; 31 32 #define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0) 33 #define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1) 34 #define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) 35 #define ADRENO_QUIRK_HAS_HW_APRIV BIT(3) 36 #define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4) 37 38 struct adreno_rev { 39 uint8_t core; 40 uint8_t major; 41 uint8_t minor; 42 uint8_t patchid; 43 }; 44 45 #define ANY_ID 0xff 46 47 #define ADRENO_REV(core, major, minor, patchid) \ 48 ((struct adreno_rev){ core, major, minor, patchid }) 49 50 struct adreno_gpu_funcs { 51 struct msm_gpu_funcs base; 52 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 53 }; 54 55 struct adreno_reglist { 56 u32 offset; 57 u32 value; 58 }; 59 60 extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[]; 61 extern const struct adreno_reglist a660_hwcg[], a690_hwcg[]; 62 63 struct adreno_speedbin { 64 uint16_t fuse; 65 uint16_t speedbin; 66 }; 67 68 struct adreno_info { 69 const char *machine; 70 struct adreno_rev rev; 71 uint32_t revn; 72 const char *fw[ADRENO_FW_MAX]; 73 uint32_t gmem; 74 u64 quirks; 75 struct msm_gpu *(*init)(struct drm_device *dev); 76 const char *zapfw; 77 u32 inactive_period; 78 const struct adreno_reglist *hwcg; 79 u64 address_space_size; 80 /** 81 * @speedbins: Optional table of fuse to speedbin mappings 82 * 83 * Consists of pairs of fuse, index mappings, terminated with 84 * {SHRT_MAX, 0} sentinal. 85 */ 86 struct adreno_speedbin *speedbins; 87 }; 88 89 /* 90 * Helper to build a speedbin table, ie. the table: 91 * fuse | speedbin 92 * -----+--------- 93 * 0 | 0 94 * 169 | 1 95 * 174 | 2 96 * 97 * would be declared as: 98 * 99 * .speedbins = ADRENO_SPEEDBINS( 100 * { 0, 0 }, 101 * { 169, 1 }, 102 * { 174, 2 }, 103 * ), 104 */ 105 #define ADRENO_SPEEDBINS(tbl...) (struct adreno_speedbin[]) { tbl {SHRT_MAX, 0} } 106 107 const struct adreno_info *adreno_info(struct adreno_rev rev); 108 109 struct adreno_gpu { 110 struct msm_gpu base; 111 struct adreno_rev rev; 112 const struct adreno_info *info; 113 uint16_t speedbin; 114 const struct adreno_gpu_funcs *funcs; 115 116 /* interesting register offsets to dump: */ 117 const unsigned int *registers; 118 119 /* 120 * Are we loading fw from legacy path? Prior to addition 121 * of gpu firmware to linux-firmware, the fw files were 122 * placed in toplevel firmware directory, following qcom's 123 * android kernel. But linux-firmware preferred they be 124 * placed in a 'qcom' subdirectory. 125 * 126 * For backwards compatibility, we try first to load from 127 * the new path, using request_firmware_direct() to avoid 128 * any potential timeout waiting for usermode helper, then 129 * fall back to the old path (with direct load). And 130 * finally fall back to request_firmware() with the new 131 * path to allow the usermode helper. 132 */ 133 enum { 134 FW_LOCATION_UNKNOWN = 0, 135 FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */ 136 FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */ 137 FW_LOCATION_HELPER, 138 } fwloc; 139 140 /* firmware: */ 141 const struct firmware *fw[ADRENO_FW_MAX]; 142 143 /* 144 * Register offsets are different between some GPUs. 145 * GPU specific offsets will be exported by GPU specific 146 * code (a3xx_gpu.c) and stored in this common location. 147 */ 148 const unsigned int *reg_offsets; 149 bool gmu_is_wrapper; 150 }; 151 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) 152 153 struct adreno_ocmem { 154 struct ocmem *ocmem; 155 unsigned long base; 156 void *hdl; 157 }; 158 159 /* platform config data (ie. from DT, or pdata) */ 160 struct adreno_platform_config { 161 struct adreno_rev rev; 162 }; 163 164 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) 165 166 #define spin_until(X) ({ \ 167 int __ret = -ETIMEDOUT; \ 168 unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \ 169 do { \ 170 if (X) { \ 171 __ret = 0; \ 172 break; \ 173 } \ 174 } while (time_before(jiffies, __t)); \ 175 __ret; \ 176 }) 177 178 bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2); 179 180 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) 181 { 182 if (WARN_ON_ONCE(!gpu->info)) 183 return false; 184 return gpu->info->revn == revn; 185 } 186 187 static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu) 188 { 189 return gpu->gmu_is_wrapper; 190 } 191 192 static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu) 193 { 194 if (WARN_ON_ONCE(!gpu->info)) 195 return false; 196 return (gpu->info->revn < 300); 197 } 198 199 static inline bool adreno_is_a20x(const struct adreno_gpu *gpu) 200 { 201 if (WARN_ON_ONCE(!gpu->info)) 202 return false; 203 return (gpu->info->revn < 210); 204 } 205 206 static inline bool adreno_is_a225(const struct adreno_gpu *gpu) 207 { 208 return adreno_is_revn(gpu, 225); 209 } 210 211 static inline bool adreno_is_a305(const struct adreno_gpu *gpu) 212 { 213 return adreno_is_revn(gpu, 305); 214 } 215 216 static inline bool adreno_is_a306(const struct adreno_gpu *gpu) 217 { 218 /* yes, 307, because a305c is 306 */ 219 return adreno_is_revn(gpu, 307); 220 } 221 222 static inline bool adreno_is_a320(const struct adreno_gpu *gpu) 223 { 224 return adreno_is_revn(gpu, 320); 225 } 226 227 static inline bool adreno_is_a330(const struct adreno_gpu *gpu) 228 { 229 return adreno_is_revn(gpu, 330); 230 } 231 232 static inline bool adreno_is_a330v2(const struct adreno_gpu *gpu) 233 { 234 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); 235 } 236 237 static inline int adreno_is_a405(const struct adreno_gpu *gpu) 238 { 239 return adreno_is_revn(gpu, 405); 240 } 241 242 static inline int adreno_is_a420(const struct adreno_gpu *gpu) 243 { 244 return adreno_is_revn(gpu, 420); 245 } 246 247 static inline int adreno_is_a430(const struct adreno_gpu *gpu) 248 { 249 return adreno_is_revn(gpu, 430); 250 } 251 252 static inline int adreno_is_a506(const struct adreno_gpu *gpu) 253 { 254 return adreno_is_revn(gpu, 506); 255 } 256 257 static inline int adreno_is_a508(const struct adreno_gpu *gpu) 258 { 259 return adreno_is_revn(gpu, 508); 260 } 261 262 static inline int adreno_is_a509(const struct adreno_gpu *gpu) 263 { 264 return adreno_is_revn(gpu, 509); 265 } 266 267 static inline int adreno_is_a510(const struct adreno_gpu *gpu) 268 { 269 return adreno_is_revn(gpu, 510); 270 } 271 272 static inline int adreno_is_a512(const struct adreno_gpu *gpu) 273 { 274 return adreno_is_revn(gpu, 512); 275 } 276 277 static inline int adreno_is_a530(const struct adreno_gpu *gpu) 278 { 279 return adreno_is_revn(gpu, 530); 280 } 281 282 static inline int adreno_is_a540(const struct adreno_gpu *gpu) 283 { 284 return adreno_is_revn(gpu, 540); 285 } 286 287 static inline int adreno_is_a610(const struct adreno_gpu *gpu) 288 { 289 return adreno_is_revn(gpu, 610); 290 } 291 292 static inline int adreno_is_a618(const struct adreno_gpu *gpu) 293 { 294 return adreno_is_revn(gpu, 618); 295 } 296 297 static inline int adreno_is_a619(const struct adreno_gpu *gpu) 298 { 299 return adreno_is_revn(gpu, 619); 300 } 301 302 static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu) 303 { 304 return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu); 305 } 306 307 static inline int adreno_is_a630(const struct adreno_gpu *gpu) 308 { 309 return adreno_is_revn(gpu, 630); 310 } 311 312 static inline int adreno_is_a640(const struct adreno_gpu *gpu) 313 { 314 return adreno_is_revn(gpu, 640); 315 } 316 317 static inline int adreno_is_a650(const struct adreno_gpu *gpu) 318 { 319 return adreno_is_revn(gpu, 650); 320 } 321 322 static inline int adreno_is_7c3(const struct adreno_gpu *gpu) 323 { 324 /* The order of args is important here to handle ANY_ID correctly */ 325 return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev); 326 } 327 328 static inline int adreno_is_a660(const struct adreno_gpu *gpu) 329 { 330 return adreno_is_revn(gpu, 660); 331 } 332 333 static inline int adreno_is_a680(const struct adreno_gpu *gpu) 334 { 335 return adreno_is_revn(gpu, 680); 336 } 337 338 static inline int adreno_is_a690(const struct adreno_gpu *gpu) 339 { 340 /* The order of args is important here to handle ANY_ID correctly */ 341 return adreno_cmp_rev(ADRENO_REV(6, 9, 0, ANY_ID), gpu->rev); 342 } 343 344 /* check for a615, a616, a618, a619 or any a630 derivatives */ 345 static inline int adreno_is_a630_family(const struct adreno_gpu *gpu) 346 { 347 return adreno_is_revn(gpu, 630) || 348 adreno_is_revn(gpu, 615) || 349 adreno_is_revn(gpu, 616) || 350 adreno_is_revn(gpu, 618) || 351 adreno_is_revn(gpu, 619); 352 } 353 354 static inline int adreno_is_a660_family(const struct adreno_gpu *gpu) 355 { 356 return adreno_is_a660(gpu) || adreno_is_a690(gpu) || adreno_is_7c3(gpu); 357 } 358 359 /* check for a650, a660, or any derivatives */ 360 static inline int adreno_is_a650_family(const struct adreno_gpu *gpu) 361 { 362 return adreno_is_revn(gpu, 650) || 363 adreno_is_revn(gpu, 620) || 364 adreno_is_a660_family(gpu); 365 } 366 367 static inline int adreno_is_a640_family(const struct adreno_gpu *gpu) 368 { 369 return adreno_is_a640(gpu) || adreno_is_a680(gpu); 370 } 371 372 u64 adreno_private_address_space_size(struct msm_gpu *gpu); 373 int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, 374 uint32_t param, uint64_t *value, uint32_t *len); 375 int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, 376 uint32_t param, uint64_t value, uint32_t len); 377 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, 378 const char *fwname); 379 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, 380 const struct firmware *fw, u64 *iova); 381 int adreno_hw_init(struct msm_gpu *gpu); 382 void adreno_recover(struct msm_gpu *gpu); 383 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg); 384 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 385 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) 386 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, 387 struct drm_printer *p); 388 #endif 389 void adreno_dump_info(struct msm_gpu *gpu); 390 void adreno_dump(struct msm_gpu *gpu); 391 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); 392 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu); 393 394 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, 395 struct adreno_ocmem *ocmem); 396 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem); 397 398 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 399 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, 400 int nr_rings); 401 void adreno_gpu_cleanup(struct adreno_gpu *gpu); 402 int adreno_load_fw(struct adreno_gpu *adreno_gpu); 403 404 void adreno_gpu_state_destroy(struct msm_gpu_state *state); 405 406 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state); 407 int adreno_gpu_state_put(struct msm_gpu_state *state); 408 void adreno_show_object(struct drm_printer *p, void **ptr, int len, 409 bool *encoded); 410 411 /* 412 * Common helper function to initialize the default address space for arm-smmu 413 * attached targets 414 */ 415 struct msm_gem_address_space * 416 adreno_create_address_space(struct msm_gpu *gpu, 417 struct platform_device *pdev); 418 419 struct msm_gem_address_space * 420 adreno_iommu_create_address_space(struct msm_gpu *gpu, 421 struct platform_device *pdev, 422 unsigned long quirks); 423 424 int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, 425 struct adreno_smmu_fault_info *info, const char *block, 426 u32 scratch[4]); 427 428 int adreno_read_speedbin(struct device *dev, u32 *speedbin); 429 430 /* 431 * For a5xx and a6xx targets load the zap shader that is used to pull the GPU 432 * out of secure mode 433 */ 434 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid); 435 436 /* ringbuffer helpers (the parts that are adreno specific) */ 437 438 static inline void 439 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) 440 { 441 adreno_wait_ring(ring, cnt+1); 442 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); 443 } 444 445 /* no-op packet: */ 446 static inline void 447 OUT_PKT2(struct msm_ringbuffer *ring) 448 { 449 adreno_wait_ring(ring, 1); 450 OUT_RING(ring, CP_TYPE2_PKT); 451 } 452 453 static inline void 454 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) 455 { 456 adreno_wait_ring(ring, cnt+1); 457 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); 458 } 459 460 static inline u32 PM4_PARITY(u32 val) 461 { 462 return (0x9669 >> (0xF & (val ^ 463 (val >> 4) ^ (val >> 8) ^ (val >> 12) ^ 464 (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^ 465 (val >> 28)))) & 1; 466 } 467 468 /* Maximum number of values that can be executed for one opcode */ 469 #define TYPE4_MAX_PAYLOAD 127 470 471 #define PKT4(_reg, _cnt) \ 472 (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \ 473 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27)) 474 475 static inline void 476 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) 477 { 478 adreno_wait_ring(ring, cnt + 1); 479 OUT_RING(ring, PKT4(regindx, cnt)); 480 } 481 482 static inline void 483 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) 484 { 485 adreno_wait_ring(ring, cnt + 1); 486 OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | 487 ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); 488 } 489 490 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); 491 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); 492 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); 493 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); 494 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev); 495 496 static inline uint32_t get_wptr(struct msm_ringbuffer *ring) 497 { 498 return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); 499 } 500 501 /* 502 * Given a register and a count, return a value to program into 503 * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len 504 * registers starting at _reg. 505 * 506 * The register base needs to be a multiple of the length. If it is not, the 507 * hardware will quietly mask off the bits for you and shift the size. For 508 * example, if you intend the protection to start at 0x07 for a length of 4 509 * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might 510 * expose registers you intended to protect! 511 */ 512 #define ADRENO_PROTECT_RW(_reg, _len) \ 513 ((1 << 30) | (1 << 29) | \ 514 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) 515 516 /* 517 * Same as above, but allow reads over the range. For areas of mixed use (such 518 * as performance counters) this allows us to protect a much larger range with a 519 * single register 520 */ 521 #define ADRENO_PROTECT_RDONLY(_reg, _len) \ 522 ((1 << 29) \ 523 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) 524 525 526 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \ 527 readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \ 528 interval, timeout) 529 530 #endif /* __ADRENO_GPU_H__ */ 531