1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #ifndef __MSM_GPU_H__ 8 #define __MSM_GPU_H__ 9 10 #include <linux/adreno-smmu-priv.h> 11 #include <linux/clk.h> 12 #include <linux/interconnect.h> 13 #include <linux/pm_opp.h> 14 #include <linux/regulator/consumer.h> 15 16 #include "msm_drv.h" 17 #include "msm_fence.h" 18 #include "msm_ringbuffer.h" 19 #include "msm_gem.h" 20 21 struct msm_gem_submit; 22 struct msm_gpu_perfcntr; 23 struct msm_gpu_state; 24 25 struct msm_gpu_config { 26 const char *ioname; 27 unsigned int nr_rings; 28 }; 29 30 /* So far, with hardware that I've seen to date, we can have: 31 * + zero, one, or two z180 2d cores 32 * + a3xx or a2xx 3d core, which share a common CP (the firmware 33 * for the CP seems to implement some different PM4 packet types 34 * but the basics of cmdstream submission are the same) 35 * 36 * Which means that the eventual complete "class" hierarchy, once 37 * support for all past and present hw is in place, becomes: 38 * + msm_gpu 39 * + adreno_gpu 40 * + a3xx_gpu 41 * + a2xx_gpu 42 * + z180_gpu 43 */ 44 struct msm_gpu_funcs { 45 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value); 46 int (*hw_init)(struct msm_gpu *gpu); 47 int (*pm_suspend)(struct msm_gpu *gpu); 48 int (*pm_resume)(struct msm_gpu *gpu); 49 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); 50 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 51 irqreturn_t (*irq)(struct msm_gpu *irq); 52 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 53 void (*recover)(struct msm_gpu *gpu); 54 void (*destroy)(struct msm_gpu *gpu); 55 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) 56 /* show GPU status in debugfs: */ 57 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, 58 struct drm_printer *p); 59 /* for generation specific debugfs: */ 60 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); 61 #endif 62 unsigned long (*gpu_busy)(struct msm_gpu *gpu); 63 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu); 64 int (*gpu_state_put)(struct msm_gpu_state *state); 65 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu); 66 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp); 67 struct msm_gem_address_space *(*create_address_space) 68 (struct msm_gpu *gpu, struct platform_device *pdev); 69 struct msm_gem_address_space *(*create_private_address_space) 70 (struct msm_gpu *gpu); 71 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 72 }; 73 74 struct msm_gpu { 75 const char *name; 76 struct drm_device *dev; 77 struct platform_device *pdev; 78 const struct msm_gpu_funcs *funcs; 79 80 struct adreno_smmu_priv adreno_smmu; 81 82 /* performance counters (hw & sw): */ 83 spinlock_t perf_lock; 84 bool perfcntr_active; 85 struct { 86 bool active; 87 ktime_t time; 88 } last_sample; 89 uint32_t totaltime, activetime; /* sw counters */ 90 uint32_t last_cntrs[5]; /* hw counters */ 91 const struct msm_gpu_perfcntr *perfcntrs; 92 uint32_t num_perfcntrs; 93 94 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; 95 int nr_rings; 96 97 /* 98 * List of GEM active objects on this gpu. Protected by 99 * msm_drm_private::mm_lock 100 */ 101 struct list_head active_list; 102 103 /* does gpu need hw_init? */ 104 bool needs_hw_init; 105 106 /* number of GPU hangs (for all contexts) */ 107 int global_faults; 108 109 void __iomem *mmio; 110 int irq; 111 112 struct msm_gem_address_space *aspace; 113 114 /* Power Control: */ 115 struct regulator *gpu_reg, *gpu_cx; 116 struct clk_bulk_data *grp_clks; 117 int nr_clocks; 118 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; 119 uint32_t fast_rate; 120 121 /* The gfx-mem interconnect path that's used by all GPU types. */ 122 struct icc_path *icc_path; 123 124 /* 125 * Second interconnect path for some A3xx and all A4xx GPUs to the 126 * On Chip MEMory (OCMEM). 127 */ 128 struct icc_path *ocmem_icc_path; 129 130 /* Hang and Inactivity Detection: 131 */ 132 #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ 133 134 #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ 135 #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) 136 struct timer_list hangcheck_timer; 137 138 /* work for handling GPU recovery: */ 139 struct kthread_work recover_work; 140 141 /* work for handling active-list retiring: */ 142 struct kthread_work retire_work; 143 144 /* worker for retire/recover: */ 145 struct kthread_worker *worker; 146 147 struct drm_gem_object *memptrs_bo; 148 149 struct { 150 struct devfreq *devfreq; 151 u64 busy_cycles; 152 ktime_t time; 153 } devfreq; 154 155 uint32_t suspend_count; 156 157 struct msm_gpu_state *crashstate; 158 /* True if the hardware supports expanded apriv (a650 and newer) */ 159 bool hw_apriv; 160 161 struct thermal_cooling_device *cooling; 162 }; 163 164 static inline struct msm_gpu *dev_to_gpu(struct device *dev) 165 { 166 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); 167 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); 168 } 169 170 /* It turns out that all targets use the same ringbuffer size */ 171 #define MSM_GPU_RINGBUFFER_SZ SZ_32K 172 #define MSM_GPU_RINGBUFFER_BLKSIZE 32 173 174 #define MSM_GPU_RB_CNTL_DEFAULT \ 175 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \ 176 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8))) 177 178 static inline bool msm_gpu_active(struct msm_gpu *gpu) 179 { 180 int i; 181 182 for (i = 0; i < gpu->nr_rings; i++) { 183 struct msm_ringbuffer *ring = gpu->rb[i]; 184 185 if (ring->seqno > ring->memptrs->fence) 186 return true; 187 } 188 189 return false; 190 } 191 192 /* Perf-Counters: 193 * The select_reg and select_val are just there for the benefit of the child 194 * class that actually enables the perf counter.. but msm_gpu base class 195 * will handle sampling/displaying the counters. 196 */ 197 198 struct msm_gpu_perfcntr { 199 uint32_t select_reg; 200 uint32_t sample_reg; 201 uint32_t select_val; 202 const char *name; 203 }; 204 205 struct msm_gpu_submitqueue { 206 int id; 207 u32 flags; 208 u32 prio; 209 int faults; 210 struct msm_file_private *ctx; 211 struct list_head node; 212 struct kref ref; 213 }; 214 215 struct msm_gpu_state_bo { 216 u64 iova; 217 size_t size; 218 void *data; 219 bool encoded; 220 }; 221 222 struct msm_gpu_state { 223 struct kref ref; 224 struct timespec64 time; 225 226 struct { 227 u64 iova; 228 u32 fence; 229 u32 seqno; 230 u32 rptr; 231 u32 wptr; 232 void *data; 233 int data_size; 234 bool encoded; 235 } ring[MSM_GPU_MAX_RINGS]; 236 237 int nr_registers; 238 u32 *registers; 239 240 u32 rbbm_status; 241 242 char *comm; 243 char *cmd; 244 245 int nr_bos; 246 struct msm_gpu_state_bo *bos; 247 }; 248 249 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) 250 { 251 msm_writel(data, gpu->mmio + (reg << 2)); 252 } 253 254 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) 255 { 256 return msm_readl(gpu->mmio + (reg << 2)); 257 } 258 259 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) 260 { 261 msm_rmw(gpu->mmio + (reg << 2), mask, or); 262 } 263 264 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi) 265 { 266 u64 val; 267 268 /* 269 * Why not a readq here? Two reasons: 1) many of the LO registers are 270 * not quad word aligned and 2) the GPU hardware designers have a bit 271 * of a history of putting registers where they fit, especially in 272 * spins. The longer a GPU family goes the higher the chance that 273 * we'll get burned. We could do a series of validity checks if we 274 * wanted to, but really is a readq() that much better? Nah. 275 */ 276 277 /* 278 * For some lo/hi registers (like perfcounters), the hi value is latched 279 * when the lo is read, so make sure to read the lo first to trigger 280 * that 281 */ 282 val = (u64) msm_readl(gpu->mmio + (lo << 2)); 283 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32); 284 285 return val; 286 } 287 288 static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) 289 { 290 /* Why not a writeq here? Read the screed above */ 291 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2)); 292 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2)); 293 } 294 295 int msm_gpu_pm_suspend(struct msm_gpu *gpu); 296 int msm_gpu_pm_resume(struct msm_gpu *gpu); 297 void msm_gpu_resume_devfreq(struct msm_gpu *gpu); 298 299 int msm_gpu_hw_init(struct msm_gpu *gpu); 300 301 void msm_gpu_perfcntr_start(struct msm_gpu *gpu); 302 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu); 303 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, 304 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs); 305 306 void msm_gpu_retire(struct msm_gpu *gpu); 307 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit); 308 309 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 310 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 311 const char *name, struct msm_gpu_config *config); 312 313 struct msm_gem_address_space * 314 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task); 315 316 void msm_gpu_cleanup(struct msm_gpu *gpu); 317 318 struct msm_gpu *adreno_load_gpu(struct drm_device *dev); 319 void __init adreno_register(void); 320 void __exit adreno_unregister(void); 321 322 static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue) 323 { 324 if (queue) 325 kref_put(&queue->ref, msm_submitqueue_destroy); 326 } 327 328 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) 329 { 330 struct msm_gpu_state *state = NULL; 331 332 mutex_lock(&gpu->dev->struct_mutex); 333 334 if (gpu->crashstate) { 335 kref_get(&gpu->crashstate->ref); 336 state = gpu->crashstate; 337 } 338 339 mutex_unlock(&gpu->dev->struct_mutex); 340 341 return state; 342 } 343 344 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) 345 { 346 mutex_lock(&gpu->dev->struct_mutex); 347 348 if (gpu->crashstate) { 349 if (gpu->funcs->gpu_state_put(gpu->crashstate)) 350 gpu->crashstate = NULL; 351 } 352 353 mutex_unlock(&gpu->dev->struct_mutex); 354 } 355 356 /* 357 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can 358 * support expanded privileges 359 */ 360 #define check_apriv(gpu, flags) \ 361 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags)) 362 363 364 #endif /* __MSM_GPU_H__ */ 365