xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gpu.h (revision 4bfba716)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MSM_GPU_H__
8 #define __MSM_GPU_H__
9 
10 #include <linux/adreno-smmu-priv.h>
11 #include <linux/clk.h>
12 #include <linux/interconnect.h>
13 #include <linux/pm_opp.h>
14 #include <linux/regulator/consumer.h>
15 
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_ringbuffer.h"
19 #include "msm_gem.h"
20 
21 struct msm_gem_submit;
22 struct msm_gpu_perfcntr;
23 struct msm_gpu_state;
24 struct msm_file_private;
25 
26 struct msm_gpu_config {
27 	const char *ioname;
28 	unsigned int nr_rings;
29 };
30 
31 /* So far, with hardware that I've seen to date, we can have:
32  *  + zero, one, or two z180 2d cores
33  *  + a3xx or a2xx 3d core, which share a common CP (the firmware
34  *    for the CP seems to implement some different PM4 packet types
35  *    but the basics of cmdstream submission are the same)
36  *
37  * Which means that the eventual complete "class" hierarchy, once
38  * support for all past and present hw is in place, becomes:
39  *  + msm_gpu
40  *    + adreno_gpu
41  *      + a3xx_gpu
42  *      + a2xx_gpu
43  *    + z180_gpu
44  */
45 struct msm_gpu_funcs {
46 	int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
47 			 uint32_t param, uint64_t *value, uint32_t *len);
48 	int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
49 			 uint32_t param, uint64_t value, uint32_t len);
50 	int (*hw_init)(struct msm_gpu *gpu);
51 	int (*pm_suspend)(struct msm_gpu *gpu);
52 	int (*pm_resume)(struct msm_gpu *gpu);
53 	void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
54 	void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
55 	irqreturn_t (*irq)(struct msm_gpu *irq);
56 	struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
57 	void (*recover)(struct msm_gpu *gpu);
58 	void (*destroy)(struct msm_gpu *gpu);
59 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
60 	/* show GPU status in debugfs: */
61 	void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
62 			struct drm_printer *p);
63 	/* for generation specific debugfs: */
64 	void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
65 #endif
66 	unsigned long (*gpu_busy)(struct msm_gpu *gpu);
67 	struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
68 	int (*gpu_state_put)(struct msm_gpu_state *state);
69 	unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
70 	void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
71 	struct msm_gem_address_space *(*create_address_space)
72 		(struct msm_gpu *gpu, struct platform_device *pdev);
73 	struct msm_gem_address_space *(*create_private_address_space)
74 		(struct msm_gpu *gpu);
75 	uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
76 };
77 
78 /* Additional state for iommu faults: */
79 struct msm_gpu_fault_info {
80 	u64 ttbr0;
81 	unsigned long iova;
82 	int flags;
83 	const char *type;
84 	const char *block;
85 };
86 
87 /**
88  * struct msm_gpu_devfreq - devfreq related state
89  */
90 struct msm_gpu_devfreq {
91 	/** devfreq: devfreq instance */
92 	struct devfreq *devfreq;
93 
94 	/**
95 	 * idle_constraint:
96 	 *
97 	 * A PM QoS constraint to limit max freq while the GPU is idle.
98 	 */
99 	struct dev_pm_qos_request idle_freq;
100 
101 	/**
102 	 * boost_constraint:
103 	 *
104 	 * A PM QoS constraint to boost min freq for a period of time
105 	 * until the boost expires.
106 	 */
107 	struct dev_pm_qos_request boost_freq;
108 
109 	/**
110 	 * busy_cycles:
111 	 *
112 	 * Used by implementation of gpu->gpu_busy() to track the last
113 	 * busy counter value, for calculating elapsed busy cycles since
114 	 * last sampling period.
115 	 */
116 	u64 busy_cycles;
117 
118 	/** time: Time of last sampling period. */
119 	ktime_t time;
120 
121 	/** idle_time: Time of last transition to idle: */
122 	ktime_t idle_time;
123 
124 	/**
125 	 * idle_work:
126 	 *
127 	 * Used to delay clamping to idle freq on active->idle transition.
128 	 */
129 	struct msm_hrtimer_work idle_work;
130 
131 	/**
132 	 * boost_work:
133 	 *
134 	 * Used to reset the boost_constraint after the boost period has
135 	 * elapsed
136 	 */
137 	struct msm_hrtimer_work boost_work;
138 };
139 
140 struct msm_gpu {
141 	const char *name;
142 	struct drm_device *dev;
143 	struct platform_device *pdev;
144 	const struct msm_gpu_funcs *funcs;
145 
146 	struct adreno_smmu_priv adreno_smmu;
147 
148 	/* performance counters (hw & sw): */
149 	spinlock_t perf_lock;
150 	bool perfcntr_active;
151 	struct {
152 		bool active;
153 		ktime_t time;
154 	} last_sample;
155 	uint32_t totaltime, activetime;    /* sw counters */
156 	uint32_t last_cntrs[5];            /* hw counters */
157 	const struct msm_gpu_perfcntr *perfcntrs;
158 	uint32_t num_perfcntrs;
159 
160 	struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
161 	int nr_rings;
162 
163 	/**
164 	 * sysprof_active:
165 	 *
166 	 * The count of contexts that have enabled system profiling.
167 	 */
168 	refcount_t sysprof_active;
169 
170 	/**
171 	 * cur_ctx_seqno:
172 	 *
173 	 * The ctx->seqno value of the last context to submit rendering,
174 	 * and the one with current pgtables installed (for generations
175 	 * that support per-context pgtables).  Tracked by seqno rather
176 	 * than pointer value to avoid dangling pointers, and cases where
177 	 * a ctx can be freed and a new one created with the same address.
178 	 */
179 	int cur_ctx_seqno;
180 
181 	/*
182 	 * List of GEM active objects on this gpu.  Protected by
183 	 * msm_drm_private::mm_lock
184 	 */
185 	struct list_head active_list;
186 
187 	/**
188 	 * lock:
189 	 *
190 	 * General lock for serializing all the gpu things.
191 	 *
192 	 * TODO move to per-ring locking where feasible (ie. submit/retire
193 	 * path, etc)
194 	 */
195 	struct mutex lock;
196 
197 	/**
198 	 * active_submits:
199 	 *
200 	 * The number of submitted but not yet retired submits, used to
201 	 * determine transitions between active and idle.
202 	 *
203 	 * Protected by active_lock
204 	 */
205 	int active_submits;
206 
207 	/** lock: protects active_submits and idle/active transitions */
208 	struct mutex active_lock;
209 
210 	/* does gpu need hw_init? */
211 	bool needs_hw_init;
212 
213 	/**
214 	 * global_faults: number of GPU hangs not attributed to a particular
215 	 * address space
216 	 */
217 	int global_faults;
218 
219 	void __iomem *mmio;
220 	int irq;
221 
222 	struct msm_gem_address_space *aspace;
223 
224 	/* Power Control: */
225 	struct regulator *gpu_reg, *gpu_cx;
226 	struct clk_bulk_data *grp_clks;
227 	int nr_clocks;
228 	struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
229 	uint32_t fast_rate;
230 
231 	/* Hang and Inactivity Detection:
232 	 */
233 #define DRM_MSM_INACTIVE_PERIOD   66 /* in ms (roughly four frames) */
234 
235 #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
236 	struct timer_list hangcheck_timer;
237 
238 	/* Fault info for most recent iova fault: */
239 	struct msm_gpu_fault_info fault_info;
240 
241 	/* work for handling GPU ioval faults: */
242 	struct kthread_work fault_work;
243 
244 	/* work for handling GPU recovery: */
245 	struct kthread_work recover_work;
246 
247 	/** retire_event: notified when submits are retired: */
248 	wait_queue_head_t retire_event;
249 
250 	/* work for handling active-list retiring: */
251 	struct kthread_work retire_work;
252 
253 	/* worker for retire/recover: */
254 	struct kthread_worker *worker;
255 
256 	struct drm_gem_object *memptrs_bo;
257 
258 	struct msm_gpu_devfreq devfreq;
259 
260 	uint32_t suspend_count;
261 
262 	struct msm_gpu_state *crashstate;
263 
264 	/* Enable clamping to idle freq when inactive: */
265 	bool clamp_to_idle;
266 
267 	/* True if the hardware supports expanded apriv (a650 and newer) */
268 	bool hw_apriv;
269 
270 	struct thermal_cooling_device *cooling;
271 };
272 
273 static inline struct msm_gpu *dev_to_gpu(struct device *dev)
274 {
275 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
276 	return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
277 }
278 
279 /* It turns out that all targets use the same ringbuffer size */
280 #define MSM_GPU_RINGBUFFER_SZ SZ_32K
281 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
282 
283 #define MSM_GPU_RB_CNTL_DEFAULT \
284 		(AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
285 		AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
286 
287 static inline bool msm_gpu_active(struct msm_gpu *gpu)
288 {
289 	int i;
290 
291 	for (i = 0; i < gpu->nr_rings; i++) {
292 		struct msm_ringbuffer *ring = gpu->rb[i];
293 
294 		if (fence_after(ring->seqno, ring->memptrs->fence))
295 			return true;
296 	}
297 
298 	return false;
299 }
300 
301 /* Perf-Counters:
302  * The select_reg and select_val are just there for the benefit of the child
303  * class that actually enables the perf counter..  but msm_gpu base class
304  * will handle sampling/displaying the counters.
305  */
306 
307 struct msm_gpu_perfcntr {
308 	uint32_t select_reg;
309 	uint32_t sample_reg;
310 	uint32_t select_val;
311 	const char *name;
312 };
313 
314 /*
315  * The number of priority levels provided by drm gpu scheduler.  The
316  * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
317  * cases, so we don't use it (no need for kernel generated jobs).
318  */
319 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
320 
321 /**
322  * struct msm_file_private - per-drm_file context
323  *
324  * @queuelock:    synchronizes access to submitqueues list
325  * @submitqueues: list of &msm_gpu_submitqueue created by userspace
326  * @queueid:      counter incremented each time a submitqueue is created,
327  *                used to assign &msm_gpu_submitqueue.id
328  * @aspace:       the per-process GPU address-space
329  * @ref:          reference count
330  * @seqno:        unique per process seqno
331  */
332 struct msm_file_private {
333 	rwlock_t queuelock;
334 	struct list_head submitqueues;
335 	int queueid;
336 	struct msm_gem_address_space *aspace;
337 	struct kref ref;
338 	int seqno;
339 
340 	/**
341 	 * sysprof:
342 	 *
343 	 * The value of MSM_PARAM_SYSPROF set by userspace.  This is
344 	 * intended to be used by system profiling tools like Mesa's
345 	 * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
346 	 *
347 	 * Setting a value of 1 will preserve performance counters across
348 	 * context switches.  Setting a value of 2 will in addition
349 	 * suppress suspend.  (Performance counters lose state across
350 	 * power collapse, which is undesirable for profiling in some
351 	 * cases.)
352 	 *
353 	 * The value automatically reverts to zero when the drm device
354 	 * file is closed.
355 	 */
356 	int sysprof;
357 
358 	/**
359 	 * entities:
360 	 *
361 	 * Table of per-priority-level sched entities used by submitqueues
362 	 * associated with this &drm_file.  Because some userspace apps
363 	 * make assumptions about rendering from multiple gl contexts
364 	 * (of the same priority) within the process happening in FIFO
365 	 * order without requiring any fencing beyond MakeCurrent(), we
366 	 * create at most one &drm_sched_entity per-process per-priority-
367 	 * level.
368 	 */
369 	struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
370 };
371 
372 /**
373  * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
374  *
375  * @gpu:        the gpu instance
376  * @prio:       the userspace priority level
377  * @ring_nr:    [out] the ringbuffer the userspace priority maps to
378  * @sched_prio: [out] the gpu scheduler priority level which the userspace
379  *              priority maps to
380  *
381  * With drm/scheduler providing it's own level of prioritization, our total
382  * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES).
383  * Each ring is associated with it's own scheduler instance.  However, our
384  * UABI is that lower numerical values are higher priority.  So mapping the
385  * single userspace priority level into ring_nr and sched_prio takes some
386  * care.  The userspace provided priority (when a submitqueue is created)
387  * is mapped to ring nr and scheduler priority as such:
388  *
389  *   ring_nr    = userspace_prio / NR_SCHED_PRIORITIES
390  *   sched_prio = NR_SCHED_PRIORITIES -
391  *                (userspace_prio % NR_SCHED_PRIORITIES) - 1
392  *
393  * This allows generations without preemption (nr_rings==1) to have some
394  * amount of prioritization, and provides more priority levels for gens
395  * that do have preemption.
396  */
397 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
398 		unsigned *ring_nr, enum drm_sched_priority *sched_prio)
399 {
400 	unsigned rn, sp;
401 
402 	rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
403 
404 	/* invert sched priority to map to higher-numeric-is-higher-
405 	 * priority convention
406 	 */
407 	sp = NR_SCHED_PRIORITIES - sp - 1;
408 
409 	if (rn >= gpu->nr_rings)
410 		return -EINVAL;
411 
412 	*ring_nr = rn;
413 	*sched_prio = sp;
414 
415 	return 0;
416 }
417 
418 /**
419  * struct msm_gpu_submitqueues - Userspace created context.
420  *
421  * A submitqueue is associated with a gl context or vk queue (or equiv)
422  * in userspace.
423  *
424  * @id:        userspace id for the submitqueue, unique within the drm_file
425  * @flags:     userspace flags for the submitqueue, specified at creation
426  *             (currently unusued)
427  * @ring_nr:   the ringbuffer used by this submitqueue, which is determined
428  *             by the submitqueue's priority
429  * @faults:    the number of GPU hangs associated with this submitqueue
430  * @last_fence: the sequence number of the last allocated fence (for error
431  *             checking)
432  * @ctx:       the per-drm_file context associated with the submitqueue (ie.
433  *             which set of pgtables do submits jobs associated with the
434  *             submitqueue use)
435  * @node:      node in the context's list of submitqueues
436  * @fence_idr: maps fence-id to dma_fence for userspace visible fence
437  *             seqno, protected by submitqueue lock
438  * @lock:      submitqueue lock
439  * @ref:       reference count
440  * @entity:    the submit job-queue
441  */
442 struct msm_gpu_submitqueue {
443 	int id;
444 	u32 flags;
445 	u32 ring_nr;
446 	int faults;
447 	uint32_t last_fence;
448 	struct msm_file_private *ctx;
449 	struct list_head node;
450 	struct idr fence_idr;
451 	struct mutex lock;
452 	struct kref ref;
453 	struct drm_sched_entity *entity;
454 };
455 
456 struct msm_gpu_state_bo {
457 	u64 iova;
458 	size_t size;
459 	void *data;
460 	bool encoded;
461 };
462 
463 struct msm_gpu_state {
464 	struct kref ref;
465 	struct timespec64 time;
466 
467 	struct {
468 		u64 iova;
469 		u32 fence;
470 		u32 seqno;
471 		u32 rptr;
472 		u32 wptr;
473 		void *data;
474 		int data_size;
475 		bool encoded;
476 	} ring[MSM_GPU_MAX_RINGS];
477 
478 	int nr_registers;
479 	u32 *registers;
480 
481 	u32 rbbm_status;
482 
483 	char *comm;
484 	char *cmd;
485 
486 	struct msm_gpu_fault_info fault_info;
487 
488 	int nr_bos;
489 	struct msm_gpu_state_bo *bos;
490 };
491 
492 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
493 {
494 	msm_writel(data, gpu->mmio + (reg << 2));
495 }
496 
497 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
498 {
499 	return msm_readl(gpu->mmio + (reg << 2));
500 }
501 
502 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
503 {
504 	msm_rmw(gpu->mmio + (reg << 2), mask, or);
505 }
506 
507 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
508 {
509 	u64 val;
510 
511 	/*
512 	 * Why not a readq here? Two reasons: 1) many of the LO registers are
513 	 * not quad word aligned and 2) the GPU hardware designers have a bit
514 	 * of a history of putting registers where they fit, especially in
515 	 * spins. The longer a GPU family goes the higher the chance that
516 	 * we'll get burned.  We could do a series of validity checks if we
517 	 * wanted to, but really is a readq() that much better? Nah.
518 	 */
519 
520 	/*
521 	 * For some lo/hi registers (like perfcounters), the hi value is latched
522 	 * when the lo is read, so make sure to read the lo first to trigger
523 	 * that
524 	 */
525 	val = (u64) msm_readl(gpu->mmio + (lo << 2));
526 	val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
527 
528 	return val;
529 }
530 
531 static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
532 {
533 	/* Why not a writeq here? Read the screed above */
534 	msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
535 	msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
536 }
537 
538 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
539 int msm_gpu_pm_resume(struct msm_gpu *gpu);
540 
541 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
542 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
543 		u32 id);
544 int msm_submitqueue_create(struct drm_device *drm,
545 		struct msm_file_private *ctx,
546 		u32 prio, u32 flags, u32 *id);
547 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
548 		struct drm_msm_submitqueue_query *args);
549 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
550 void msm_submitqueue_close(struct msm_file_private *ctx);
551 
552 void msm_submitqueue_destroy(struct kref *kref);
553 
554 int msm_file_private_set_sysprof(struct msm_file_private *ctx,
555 				 struct msm_gpu *gpu, int sysprof);
556 void __msm_file_private_destroy(struct kref *kref);
557 
558 static inline void msm_file_private_put(struct msm_file_private *ctx)
559 {
560 	kref_put(&ctx->ref, __msm_file_private_destroy);
561 }
562 
563 static inline struct msm_file_private *msm_file_private_get(
564 	struct msm_file_private *ctx)
565 {
566 	kref_get(&ctx->ref);
567 	return ctx;
568 }
569 
570 void msm_devfreq_init(struct msm_gpu *gpu);
571 void msm_devfreq_cleanup(struct msm_gpu *gpu);
572 void msm_devfreq_resume(struct msm_gpu *gpu);
573 void msm_devfreq_suspend(struct msm_gpu *gpu);
574 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
575 void msm_devfreq_active(struct msm_gpu *gpu);
576 void msm_devfreq_idle(struct msm_gpu *gpu);
577 
578 int msm_gpu_hw_init(struct msm_gpu *gpu);
579 
580 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
581 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
582 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
583 		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
584 
585 void msm_gpu_retire(struct msm_gpu *gpu);
586 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
587 
588 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
589 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
590 		const char *name, struct msm_gpu_config *config);
591 
592 struct msm_gem_address_space *
593 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
594 
595 void msm_gpu_cleanup(struct msm_gpu *gpu);
596 
597 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
598 void __init adreno_register(void);
599 void __exit adreno_unregister(void);
600 
601 static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
602 {
603 	if (queue)
604 		kref_put(&queue->ref, msm_submitqueue_destroy);
605 }
606 
607 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
608 {
609 	struct msm_gpu_state *state = NULL;
610 
611 	mutex_lock(&gpu->lock);
612 
613 	if (gpu->crashstate) {
614 		kref_get(&gpu->crashstate->ref);
615 		state = gpu->crashstate;
616 	}
617 
618 	mutex_unlock(&gpu->lock);
619 
620 	return state;
621 }
622 
623 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
624 {
625 	mutex_lock(&gpu->lock);
626 
627 	if (gpu->crashstate) {
628 		if (gpu->funcs->gpu_state_put(gpu->crashstate))
629 			gpu->crashstate = NULL;
630 	}
631 
632 	mutex_unlock(&gpu->lock);
633 }
634 
635 /*
636  * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
637  * support expanded privileges
638  */
639 #define check_apriv(gpu, flags) \
640 	(((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
641 
642 
643 #endif /* __MSM_GPU_H__ */
644