1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #ifndef __MSM_GPU_H__ 8 #define __MSM_GPU_H__ 9 10 #include <linux/adreno-smmu-priv.h> 11 #include <linux/clk.h> 12 #include <linux/devfreq.h> 13 #include <linux/interconnect.h> 14 #include <linux/pm_opp.h> 15 #include <linux/regulator/consumer.h> 16 17 #include "msm_drv.h" 18 #include "msm_fence.h" 19 #include "msm_ringbuffer.h" 20 #include "msm_gem.h" 21 22 struct msm_gem_submit; 23 struct msm_gpu_perfcntr; 24 struct msm_gpu_state; 25 struct msm_file_private; 26 27 struct msm_gpu_config { 28 const char *ioname; 29 unsigned int nr_rings; 30 }; 31 32 /* So far, with hardware that I've seen to date, we can have: 33 * + zero, one, or two z180 2d cores 34 * + a3xx or a2xx 3d core, which share a common CP (the firmware 35 * for the CP seems to implement some different PM4 packet types 36 * but the basics of cmdstream submission are the same) 37 * 38 * Which means that the eventual complete "class" hierarchy, once 39 * support for all past and present hw is in place, becomes: 40 * + msm_gpu 41 * + adreno_gpu 42 * + a3xx_gpu 43 * + a2xx_gpu 44 * + z180_gpu 45 */ 46 struct msm_gpu_funcs { 47 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, 48 uint32_t param, uint64_t *value, uint32_t *len); 49 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, 50 uint32_t param, uint64_t value, uint32_t len); 51 int (*hw_init)(struct msm_gpu *gpu); 52 int (*pm_suspend)(struct msm_gpu *gpu); 53 int (*pm_resume)(struct msm_gpu *gpu); 54 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); 55 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 56 irqreturn_t (*irq)(struct msm_gpu *irq); 57 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 58 void (*recover)(struct msm_gpu *gpu); 59 void (*destroy)(struct msm_gpu *gpu); 60 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) 61 /* show GPU status in debugfs: */ 62 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, 63 struct drm_printer *p); 64 /* for generation specific debugfs: */ 65 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); 66 #endif 67 /* note: gpu_busy() can assume that we have been pm_resumed */ 68 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate); 69 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu); 70 int (*gpu_state_put)(struct msm_gpu_state *state); 71 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu); 72 /* note: gpu_set_freq() can assume that we have been pm_resumed */ 73 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp, 74 bool suspended); 75 struct msm_gem_address_space *(*create_address_space) 76 (struct msm_gpu *gpu, struct platform_device *pdev); 77 struct msm_gem_address_space *(*create_private_address_space) 78 (struct msm_gpu *gpu); 79 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 80 }; 81 82 /* Additional state for iommu faults: */ 83 struct msm_gpu_fault_info { 84 u64 ttbr0; 85 unsigned long iova; 86 int flags; 87 const char *type; 88 const char *block; 89 }; 90 91 /** 92 * struct msm_gpu_devfreq - devfreq related state 93 */ 94 struct msm_gpu_devfreq { 95 /** devfreq: devfreq instance */ 96 struct devfreq *devfreq; 97 98 /** lock: lock for "suspended", "busy_cycles", and "time" */ 99 struct mutex lock; 100 101 /** 102 * idle_constraint: 103 * 104 * A PM QoS constraint to limit max freq while the GPU is idle. 105 */ 106 struct dev_pm_qos_request idle_freq; 107 108 /** 109 * boost_constraint: 110 * 111 * A PM QoS constraint to boost min freq for a period of time 112 * until the boost expires. 113 */ 114 struct dev_pm_qos_request boost_freq; 115 116 /** 117 * busy_cycles: Last busy counter value, for calculating elapsed busy 118 * cycles since last sampling period. 119 */ 120 u64 busy_cycles; 121 122 /** time: Time of last sampling period. */ 123 ktime_t time; 124 125 /** idle_time: Time of last transition to idle: */ 126 ktime_t idle_time; 127 128 struct devfreq_dev_status average_status; 129 130 /** 131 * idle_work: 132 * 133 * Used to delay clamping to idle freq on active->idle transition. 134 */ 135 struct msm_hrtimer_work idle_work; 136 137 /** 138 * boost_work: 139 * 140 * Used to reset the boost_constraint after the boost period has 141 * elapsed 142 */ 143 struct msm_hrtimer_work boost_work; 144 145 /** suspended: tracks if we're suspended */ 146 bool suspended; 147 }; 148 149 struct msm_gpu { 150 const char *name; 151 struct drm_device *dev; 152 struct platform_device *pdev; 153 const struct msm_gpu_funcs *funcs; 154 155 struct adreno_smmu_priv adreno_smmu; 156 157 /* performance counters (hw & sw): */ 158 spinlock_t perf_lock; 159 bool perfcntr_active; 160 struct { 161 bool active; 162 ktime_t time; 163 } last_sample; 164 uint32_t totaltime, activetime; /* sw counters */ 165 uint32_t last_cntrs[5]; /* hw counters */ 166 const struct msm_gpu_perfcntr *perfcntrs; 167 uint32_t num_perfcntrs; 168 169 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; 170 int nr_rings; 171 172 /** 173 * sysprof_active: 174 * 175 * The count of contexts that have enabled system profiling. 176 */ 177 refcount_t sysprof_active; 178 179 /** 180 * cur_ctx_seqno: 181 * 182 * The ctx->seqno value of the last context to submit rendering, 183 * and the one with current pgtables installed (for generations 184 * that support per-context pgtables). Tracked by seqno rather 185 * than pointer value to avoid dangling pointers, and cases where 186 * a ctx can be freed and a new one created with the same address. 187 */ 188 int cur_ctx_seqno; 189 190 /* 191 * List of GEM active objects on this gpu. Protected by 192 * msm_drm_private::mm_lock 193 */ 194 struct list_head active_list; 195 196 /** 197 * lock: 198 * 199 * General lock for serializing all the gpu things. 200 * 201 * TODO move to per-ring locking where feasible (ie. submit/retire 202 * path, etc) 203 */ 204 struct mutex lock; 205 206 /** 207 * active_submits: 208 * 209 * The number of submitted but not yet retired submits, used to 210 * determine transitions between active and idle. 211 * 212 * Protected by active_lock 213 */ 214 int active_submits; 215 216 /** lock: protects active_submits and idle/active transitions */ 217 struct mutex active_lock; 218 219 /* does gpu need hw_init? */ 220 bool needs_hw_init; 221 222 /** 223 * global_faults: number of GPU hangs not attributed to a particular 224 * address space 225 */ 226 int global_faults; 227 228 void __iomem *mmio; 229 int irq; 230 231 struct msm_gem_address_space *aspace; 232 233 /* Power Control: */ 234 struct regulator *gpu_reg, *gpu_cx; 235 struct clk_bulk_data *grp_clks; 236 int nr_clocks; 237 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; 238 uint32_t fast_rate; 239 240 /* Hang and Inactivity Detection: 241 */ 242 #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ 243 244 #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */ 245 struct timer_list hangcheck_timer; 246 247 /* Fault info for most recent iova fault: */ 248 struct msm_gpu_fault_info fault_info; 249 250 /* work for handling GPU ioval faults: */ 251 struct kthread_work fault_work; 252 253 /* work for handling GPU recovery: */ 254 struct kthread_work recover_work; 255 256 /** retire_event: notified when submits are retired: */ 257 wait_queue_head_t retire_event; 258 259 /* work for handling active-list retiring: */ 260 struct kthread_work retire_work; 261 262 /* worker for retire/recover: */ 263 struct kthread_worker *worker; 264 265 struct drm_gem_object *memptrs_bo; 266 267 struct msm_gpu_devfreq devfreq; 268 269 uint32_t suspend_count; 270 271 struct msm_gpu_state *crashstate; 272 273 /* Enable clamping to idle freq when inactive: */ 274 bool clamp_to_idle; 275 276 /* True if the hardware supports expanded apriv (a650 and newer) */ 277 bool hw_apriv; 278 279 struct thermal_cooling_device *cooling; 280 }; 281 282 static inline struct msm_gpu *dev_to_gpu(struct device *dev) 283 { 284 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); 285 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); 286 } 287 288 /* It turns out that all targets use the same ringbuffer size */ 289 #define MSM_GPU_RINGBUFFER_SZ SZ_32K 290 #define MSM_GPU_RINGBUFFER_BLKSIZE 32 291 292 #define MSM_GPU_RB_CNTL_DEFAULT \ 293 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \ 294 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8))) 295 296 static inline bool msm_gpu_active(struct msm_gpu *gpu) 297 { 298 int i; 299 300 for (i = 0; i < gpu->nr_rings; i++) { 301 struct msm_ringbuffer *ring = gpu->rb[i]; 302 303 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence)) 304 return true; 305 } 306 307 return false; 308 } 309 310 /* Perf-Counters: 311 * The select_reg and select_val are just there for the benefit of the child 312 * class that actually enables the perf counter.. but msm_gpu base class 313 * will handle sampling/displaying the counters. 314 */ 315 316 struct msm_gpu_perfcntr { 317 uint32_t select_reg; 318 uint32_t sample_reg; 319 uint32_t select_val; 320 const char *name; 321 }; 322 323 /* 324 * The number of priority levels provided by drm gpu scheduler. The 325 * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some 326 * cases, so we don't use it (no need for kernel generated jobs). 327 */ 328 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) 329 330 /** 331 * struct msm_file_private - per-drm_file context 332 * 333 * @queuelock: synchronizes access to submitqueues list 334 * @submitqueues: list of &msm_gpu_submitqueue created by userspace 335 * @queueid: counter incremented each time a submitqueue is created, 336 * used to assign &msm_gpu_submitqueue.id 337 * @aspace: the per-process GPU address-space 338 * @ref: reference count 339 * @seqno: unique per process seqno 340 */ 341 struct msm_file_private { 342 rwlock_t queuelock; 343 struct list_head submitqueues; 344 int queueid; 345 struct msm_gem_address_space *aspace; 346 struct kref ref; 347 int seqno; 348 349 /** 350 * sysprof: 351 * 352 * The value of MSM_PARAM_SYSPROF set by userspace. This is 353 * intended to be used by system profiling tools like Mesa's 354 * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN. 355 * 356 * Setting a value of 1 will preserve performance counters across 357 * context switches. Setting a value of 2 will in addition 358 * suppress suspend. (Performance counters lose state across 359 * power collapse, which is undesirable for profiling in some 360 * cases.) 361 * 362 * The value automatically reverts to zero when the drm device 363 * file is closed. 364 */ 365 int sysprof; 366 367 /** comm: Overridden task comm, see MSM_PARAM_COMM */ 368 char *comm; 369 370 /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */ 371 char *cmdline; 372 373 /** 374 * elapsed: 375 * 376 * The total (cumulative) elapsed time GPU was busy with rendering 377 * from this context in ns. 378 */ 379 uint64_t elapsed_ns; 380 381 /** 382 * cycles: 383 * 384 * The total (cumulative) GPU cycles elapsed attributed to this 385 * context. 386 */ 387 uint64_t cycles; 388 389 /** 390 * entities: 391 * 392 * Table of per-priority-level sched entities used by submitqueues 393 * associated with this &drm_file. Because some userspace apps 394 * make assumptions about rendering from multiple gl contexts 395 * (of the same priority) within the process happening in FIFO 396 * order without requiring any fencing beyond MakeCurrent(), we 397 * create at most one &drm_sched_entity per-process per-priority- 398 * level. 399 */ 400 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; 401 }; 402 403 /** 404 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority 405 * 406 * @gpu: the gpu instance 407 * @prio: the userspace priority level 408 * @ring_nr: [out] the ringbuffer the userspace priority maps to 409 * @sched_prio: [out] the gpu scheduler priority level which the userspace 410 * priority maps to 411 * 412 * With drm/scheduler providing it's own level of prioritization, our total 413 * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES). 414 * Each ring is associated with it's own scheduler instance. However, our 415 * UABI is that lower numerical values are higher priority. So mapping the 416 * single userspace priority level into ring_nr and sched_prio takes some 417 * care. The userspace provided priority (when a submitqueue is created) 418 * is mapped to ring nr and scheduler priority as such: 419 * 420 * ring_nr = userspace_prio / NR_SCHED_PRIORITIES 421 * sched_prio = NR_SCHED_PRIORITIES - 422 * (userspace_prio % NR_SCHED_PRIORITIES) - 1 423 * 424 * This allows generations without preemption (nr_rings==1) to have some 425 * amount of prioritization, and provides more priority levels for gens 426 * that do have preemption. 427 */ 428 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, 429 unsigned *ring_nr, enum drm_sched_priority *sched_prio) 430 { 431 unsigned rn, sp; 432 433 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp); 434 435 /* invert sched priority to map to higher-numeric-is-higher- 436 * priority convention 437 */ 438 sp = NR_SCHED_PRIORITIES - sp - 1; 439 440 if (rn >= gpu->nr_rings) 441 return -EINVAL; 442 443 *ring_nr = rn; 444 *sched_prio = sp; 445 446 return 0; 447 } 448 449 /** 450 * struct msm_gpu_submitqueues - Userspace created context. 451 * 452 * A submitqueue is associated with a gl context or vk queue (or equiv) 453 * in userspace. 454 * 455 * @id: userspace id for the submitqueue, unique within the drm_file 456 * @flags: userspace flags for the submitqueue, specified at creation 457 * (currently unusued) 458 * @ring_nr: the ringbuffer used by this submitqueue, which is determined 459 * by the submitqueue's priority 460 * @faults: the number of GPU hangs associated with this submitqueue 461 * @last_fence: the sequence number of the last allocated fence (for error 462 * checking) 463 * @ctx: the per-drm_file context associated with the submitqueue (ie. 464 * which set of pgtables do submits jobs associated with the 465 * submitqueue use) 466 * @node: node in the context's list of submitqueues 467 * @fence_idr: maps fence-id to dma_fence for userspace visible fence 468 * seqno, protected by submitqueue lock 469 * @lock: submitqueue lock 470 * @ref: reference count 471 * @entity: the submit job-queue 472 */ 473 struct msm_gpu_submitqueue { 474 int id; 475 u32 flags; 476 u32 ring_nr; 477 int faults; 478 uint32_t last_fence; 479 struct msm_file_private *ctx; 480 struct list_head node; 481 struct idr fence_idr; 482 struct mutex lock; 483 struct kref ref; 484 struct drm_sched_entity *entity; 485 }; 486 487 struct msm_gpu_state_bo { 488 u64 iova; 489 size_t size; 490 void *data; 491 bool encoded; 492 char name[32]; 493 }; 494 495 struct msm_gpu_state { 496 struct kref ref; 497 struct timespec64 time; 498 499 struct { 500 u64 iova; 501 u32 fence; 502 u32 seqno; 503 u32 rptr; 504 u32 wptr; 505 void *data; 506 int data_size; 507 bool encoded; 508 } ring[MSM_GPU_MAX_RINGS]; 509 510 int nr_registers; 511 u32 *registers; 512 513 u32 rbbm_status; 514 515 char *comm; 516 char *cmd; 517 518 struct msm_gpu_fault_info fault_info; 519 520 int nr_bos; 521 struct msm_gpu_state_bo *bos; 522 }; 523 524 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) 525 { 526 msm_writel(data, gpu->mmio + (reg << 2)); 527 } 528 529 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) 530 { 531 return msm_readl(gpu->mmio + (reg << 2)); 532 } 533 534 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) 535 { 536 msm_rmw(gpu->mmio + (reg << 2), mask, or); 537 } 538 539 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi) 540 { 541 u64 val; 542 543 /* 544 * Why not a readq here? Two reasons: 1) many of the LO registers are 545 * not quad word aligned and 2) the GPU hardware designers have a bit 546 * of a history of putting registers where they fit, especially in 547 * spins. The longer a GPU family goes the higher the chance that 548 * we'll get burned. We could do a series of validity checks if we 549 * wanted to, but really is a readq() that much better? Nah. 550 */ 551 552 /* 553 * For some lo/hi registers (like perfcounters), the hi value is latched 554 * when the lo is read, so make sure to read the lo first to trigger 555 * that 556 */ 557 val = (u64) msm_readl(gpu->mmio + (lo << 2)); 558 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32); 559 560 return val; 561 } 562 563 static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) 564 { 565 /* Why not a writeq here? Read the screed above */ 566 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2)); 567 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2)); 568 } 569 570 int msm_gpu_pm_suspend(struct msm_gpu *gpu); 571 int msm_gpu_pm_resume(struct msm_gpu *gpu); 572 573 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx, 574 struct drm_printer *p); 575 576 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); 577 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, 578 u32 id); 579 int msm_submitqueue_create(struct drm_device *drm, 580 struct msm_file_private *ctx, 581 u32 prio, u32 flags, u32 *id); 582 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, 583 struct drm_msm_submitqueue_query *args); 584 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); 585 void msm_submitqueue_close(struct msm_file_private *ctx); 586 587 void msm_submitqueue_destroy(struct kref *kref); 588 589 int msm_file_private_set_sysprof(struct msm_file_private *ctx, 590 struct msm_gpu *gpu, int sysprof); 591 void __msm_file_private_destroy(struct kref *kref); 592 593 static inline void msm_file_private_put(struct msm_file_private *ctx) 594 { 595 kref_put(&ctx->ref, __msm_file_private_destroy); 596 } 597 598 static inline struct msm_file_private *msm_file_private_get( 599 struct msm_file_private *ctx) 600 { 601 kref_get(&ctx->ref); 602 return ctx; 603 } 604 605 void msm_devfreq_init(struct msm_gpu *gpu); 606 void msm_devfreq_cleanup(struct msm_gpu *gpu); 607 void msm_devfreq_resume(struct msm_gpu *gpu); 608 void msm_devfreq_suspend(struct msm_gpu *gpu); 609 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor); 610 void msm_devfreq_active(struct msm_gpu *gpu); 611 void msm_devfreq_idle(struct msm_gpu *gpu); 612 613 int msm_gpu_hw_init(struct msm_gpu *gpu); 614 615 void msm_gpu_perfcntr_start(struct msm_gpu *gpu); 616 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu); 617 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, 618 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs); 619 620 void msm_gpu_retire(struct msm_gpu *gpu); 621 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit); 622 623 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 624 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 625 const char *name, struct msm_gpu_config *config); 626 627 struct msm_gem_address_space * 628 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task); 629 630 void msm_gpu_cleanup(struct msm_gpu *gpu); 631 632 struct msm_gpu *adreno_load_gpu(struct drm_device *dev); 633 void __init adreno_register(void); 634 void __exit adreno_unregister(void); 635 636 static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue) 637 { 638 if (queue) 639 kref_put(&queue->ref, msm_submitqueue_destroy); 640 } 641 642 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) 643 { 644 struct msm_gpu_state *state = NULL; 645 646 mutex_lock(&gpu->lock); 647 648 if (gpu->crashstate) { 649 kref_get(&gpu->crashstate->ref); 650 state = gpu->crashstate; 651 } 652 653 mutex_unlock(&gpu->lock); 654 655 return state; 656 } 657 658 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) 659 { 660 mutex_lock(&gpu->lock); 661 662 if (gpu->crashstate) { 663 if (gpu->funcs->gpu_state_put(gpu->crashstate)) 664 gpu->crashstate = NULL; 665 } 666 667 mutex_unlock(&gpu->lock); 668 } 669 670 /* 671 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can 672 * support expanded privileges 673 */ 674 #define check_apriv(gpu, flags) \ 675 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags)) 676 677 678 #endif /* __MSM_GPU_H__ */ 679