xref: /openbmc/linux/drivers/gpu/drm/msm/msm_ringbuffer.h (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
27198e6b0SRob Clark /*
37198e6b0SRob Clark  * Copyright (C) 2013 Red Hat
47198e6b0SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
57198e6b0SRob Clark  */
67198e6b0SRob Clark 
77198e6b0SRob Clark #ifndef __MSM_RINGBUFFER_H__
87198e6b0SRob Clark #define __MSM_RINGBUFFER_H__
97198e6b0SRob Clark 
101d8a5ca4SRob Clark #include "drm/gpu_scheduler.h"
117198e6b0SRob Clark #include "msm_drv.h"
127198e6b0SRob Clark 
13f97decacSJordan Crouse #define rbmemptr(ring, member)  \
14f97decacSJordan Crouse 	((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
15f97decacSJordan Crouse 
1656869210SJordan Crouse #define rbmemptr_stats(ring, index, member) \
1756869210SJordan Crouse 	(rbmemptr((ring), stats) + \
1856869210SJordan Crouse 	 ((index) * sizeof(struct msm_gpu_submit_stats)) + \
1956869210SJordan Crouse 	 offsetof(struct msm_gpu_submit_stats, member))
2056869210SJordan Crouse 
2156869210SJordan Crouse struct msm_gpu_submit_stats {
2256869210SJordan Crouse 	u64 cpcycles_start;
2356869210SJordan Crouse 	u64 cpcycles_end;
2456869210SJordan Crouse 	u64 alwayson_start;
2556869210SJordan Crouse 	u64 alwayson_end;
2656869210SJordan Crouse };
2756869210SJordan Crouse 
2856869210SJordan Crouse #define MSM_GPU_SUBMIT_STATS_COUNT 64
2956869210SJordan Crouse 
30f97decacSJordan Crouse struct msm_rbmemptrs {
31f97decacSJordan Crouse 	volatile uint32_t rptr;
32f97decacSJordan Crouse 	volatile uint32_t fence;
3356869210SJordan Crouse 
3456869210SJordan Crouse 	volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
3584c31ee1SJordan Crouse 	volatile u64 ttbr0;
367198e6b0SRob Clark };
377198e6b0SRob Clark 
38*d73b1d02SRob Clark struct msm_cp_state {
39*d73b1d02SRob Clark 	uint64_t ib1_base, ib2_base;
40*d73b1d02SRob Clark 	uint32_t ib1_rem, ib2_rem;
41*d73b1d02SRob Clark };
42*d73b1d02SRob Clark 
43f97decacSJordan Crouse struct msm_ringbuffer {
44f97decacSJordan Crouse 	struct msm_gpu *gpu;
45f97decacSJordan Crouse 	int id;
46f97decacSJordan Crouse 	struct drm_gem_object *bo;
474c7085a5SJordan Crouse 	uint32_t *start, *end, *cur, *next;
4877d20529SRob Clark 
4977d20529SRob Clark 	/*
501d8a5ca4SRob Clark 	 * The job scheduler for this ring.
511d8a5ca4SRob Clark 	 */
521d8a5ca4SRob Clark 	struct drm_gpu_scheduler sched;
531d8a5ca4SRob Clark 
541d8a5ca4SRob Clark 	/*
5577d20529SRob Clark 	 * List of in-flight submits on this ring.  Protected by submit_lock.
561d8a5ca4SRob Clark 	 *
571d8a5ca4SRob Clark 	 * Currently just submits that are already written into the ring, not
581d8a5ca4SRob Clark 	 * submits that are still in drm_gpu_scheduler's queues.  At a later
591d8a5ca4SRob Clark 	 * step we could probably move to letting drm_gpu_scheduler manage
601d8a5ca4SRob Clark 	 * hangcheck detection and keep track of submit jobs that are in-
611d8a5ca4SRob Clark 	 * flight.
6277d20529SRob Clark 	 */
63f97decacSJordan Crouse 	struct list_head submits;
6477d20529SRob Clark 	spinlock_t submit_lock;
6577d20529SRob Clark 
66f97decacSJordan Crouse 	uint64_t iova;
67f97decacSJordan Crouse 	uint32_t hangcheck_fence;
68f97decacSJordan Crouse 	struct msm_rbmemptrs *memptrs;
69f97decacSJordan Crouse 	uint64_t memptrs_iova;
70f97decacSJordan Crouse 	struct msm_fence_context *fctx;
7177c40603SRob Clark 
72*d73b1d02SRob Clark 	/**
73*d73b1d02SRob Clark 	 * hangcheck_progress_retries:
74*d73b1d02SRob Clark 	 *
75*d73b1d02SRob Clark 	 * The number of extra hangcheck duration cycles that we have given
76*d73b1d02SRob Clark 	 * due to it appearing that the GPU is making forward progress.
77*d73b1d02SRob Clark 	 *
78*d73b1d02SRob Clark 	 * For GPU generations which support progress detection (see.
79*d73b1d02SRob Clark 	 * msm_gpu_funcs::progress()), if the GPU appears to be making progress
80*d73b1d02SRob Clark 	 * (ie. the CP has advanced in the command stream, we'll allow up to
81*d73b1d02SRob Clark 	 * DRM_MSM_HANGCHECK_PROGRESS_RETRIES expirations of the hangcheck timer
82*d73b1d02SRob Clark 	 * before killing the job.  But to detect progress we need two sample
83*d73b1d02SRob Clark 	 * points, so the duration of the hangcheck timer is halved.  In other
84*d73b1d02SRob Clark 	 * words we'll let the submit run for up to:
85*d73b1d02SRob Clark 	 *
86*d73b1d02SRob Clark 	 * (DRM_MSM_HANGCHECK_DEFAULT_PERIOD / 2) * (DRM_MSM_HANGCHECK_PROGRESS_RETRIES + 1)
87*d73b1d02SRob Clark 	 */
88*d73b1d02SRob Clark 	int hangcheck_progress_retries;
89*d73b1d02SRob Clark 
90*d73b1d02SRob Clark 	/**
91*d73b1d02SRob Clark 	 * last_cp_state: The state of the CP at the last call to gpu->progress()
92*d73b1d02SRob Clark 	 */
93*d73b1d02SRob Clark 	struct msm_cp_state last_cp_state;
94*d73b1d02SRob Clark 
9577c40603SRob Clark 	/*
9677c40603SRob Clark 	 * preempt_lock protects preemption and serializes wptr updates against
9777c40603SRob Clark 	 * preemption.  Can be aquired from irq context.
9877c40603SRob Clark 	 */
9977c40603SRob Clark 	spinlock_t preempt_lock;
100f97decacSJordan Crouse };
101f97decacSJordan Crouse 
102f97decacSJordan Crouse struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
103f97decacSJordan Crouse 		void *memptrs, uint64_t memptrs_iova);
1047198e6b0SRob Clark void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
1057198e6b0SRob Clark 
1067198e6b0SRob Clark /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
1077198e6b0SRob Clark 
1087198e6b0SRob Clark static inline void
OUT_RING(struct msm_ringbuffer * ring,uint32_t data)1097198e6b0SRob Clark OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
1107198e6b0SRob Clark {
1114c7085a5SJordan Crouse 	/*
1124c7085a5SJordan Crouse 	 * ring->next points to the current command being written - it won't be
1134c7085a5SJordan Crouse 	 * committed as ring->cur until the flush
1144c7085a5SJordan Crouse 	 */
1154c7085a5SJordan Crouse 	if (ring->next == ring->end)
1164c7085a5SJordan Crouse 		ring->next = ring->start;
1174c7085a5SJordan Crouse 	*(ring->next++) = data;
1187198e6b0SRob Clark }
1197198e6b0SRob Clark 
1207198e6b0SRob Clark #endif /* __MSM_RINGBUFFER_H__ */
121