xref: /openbmc/linux/include/drm/gpu_scheduler.h (revision 160b8e75)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26 
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 
30 struct drm_gpu_scheduler;
31 struct drm_sched_rq;
32 
33 enum drm_sched_priority {
34 	DRM_SCHED_PRIORITY_MIN,
35 	DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
36 	DRM_SCHED_PRIORITY_NORMAL,
37 	DRM_SCHED_PRIORITY_HIGH_SW,
38 	DRM_SCHED_PRIORITY_HIGH_HW,
39 	DRM_SCHED_PRIORITY_KERNEL,
40 	DRM_SCHED_PRIORITY_MAX,
41 	DRM_SCHED_PRIORITY_INVALID = -1,
42 	DRM_SCHED_PRIORITY_UNSET = -2
43 };
44 
45 /**
46  * A scheduler entity is a wrapper around a job queue or a group
47  * of other entities. Entities take turns emitting jobs from their
48  * job queues to corresponding hardware ring based on scheduling
49  * policy.
50 */
51 struct drm_sched_entity {
52 	struct list_head		list;
53 	struct drm_sched_rq		*rq;
54 	spinlock_t			rq_lock;
55 	struct drm_gpu_scheduler	*sched;
56 
57 	spinlock_t			queue_lock;
58 	struct spsc_queue		job_queue;
59 
60 	atomic_t			fence_seq;
61 	uint64_t			fence_context;
62 
63 	struct dma_fence		*dependency;
64 	struct dma_fence_cb		cb;
65 	atomic_t			*guilty; /* points to ctx's guilty */
66 };
67 
68 /**
69  * Run queue is a set of entities scheduling command submissions for
70  * one specific ring. It implements the scheduling policy that selects
71  * the next entity to emit commands from.
72 */
73 struct drm_sched_rq {
74 	spinlock_t			lock;
75 	struct list_head		entities;
76 	struct drm_sched_entity		*current_entity;
77 };
78 
79 struct drm_sched_fence {
80 	struct dma_fence		scheduled;
81 	struct dma_fence		finished;
82 	struct dma_fence_cb		cb;
83 	struct dma_fence		*parent;
84 	struct drm_gpu_scheduler	*sched;
85 	spinlock_t			lock;
86 	void				*owner;
87 };
88 
89 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
90 
91 struct drm_sched_job {
92 	struct spsc_node		queue_node;
93 	struct drm_gpu_scheduler	*sched;
94 	struct drm_sched_fence		*s_fence;
95 	struct dma_fence_cb		finish_cb;
96 	struct work_struct		finish_work;
97 	struct list_head		node;
98 	struct delayed_work		work_tdr;
99 	uint64_t			id;
100 	atomic_t			karma;
101 	enum drm_sched_priority		s_priority;
102 };
103 
104 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
105 					    int threshold)
106 {
107 	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
108 }
109 
110 /**
111  * Define the backend operations called by the scheduler,
112  * these functions should be implemented in driver side
113 */
114 struct drm_sched_backend_ops {
115 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
116 					struct drm_sched_entity *s_entity);
117 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
118 	void (*timedout_job)(struct drm_sched_job *sched_job);
119 	void (*free_job)(struct drm_sched_job *sched_job);
120 };
121 
122 /**
123  * One scheduler is implemented for each hardware ring
124 */
125 struct drm_gpu_scheduler {
126 	const struct drm_sched_backend_ops	*ops;
127 	uint32_t			hw_submission_limit;
128 	long				timeout;
129 	const char			*name;
130 	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_MAX];
131 	wait_queue_head_t		wake_up_worker;
132 	wait_queue_head_t		job_scheduled;
133 	atomic_t			hw_rq_count;
134 	atomic64_t			job_id_count;
135 	struct task_struct		*thread;
136 	struct list_head		ring_mirror_list;
137 	spinlock_t			job_list_lock;
138 	int				hang_limit;
139 };
140 
141 int drm_sched_init(struct drm_gpu_scheduler *sched,
142 		   const struct drm_sched_backend_ops *ops,
143 		   uint32_t hw_submission, unsigned hang_limit, long timeout,
144 		   const char *name);
145 void drm_sched_fini(struct drm_gpu_scheduler *sched);
146 
147 int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
148 			  struct drm_sched_entity *entity,
149 			  struct drm_sched_rq *rq,
150 			  uint32_t jobs, atomic_t *guilty);
151 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
152 			   struct drm_sched_entity *entity);
153 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
154 			       struct drm_sched_entity *entity);
155 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
156 			     struct drm_sched_rq *rq);
157 
158 struct drm_sched_fence *drm_sched_fence_create(
159 	struct drm_sched_entity *s_entity, void *owner);
160 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
161 void drm_sched_fence_finished(struct drm_sched_fence *fence);
162 int drm_sched_job_init(struct drm_sched_job *job,
163 		       struct drm_gpu_scheduler *sched,
164 		       struct drm_sched_entity *entity,
165 		       void *owner);
166 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
167 			    struct drm_sched_job *job);
168 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
169 bool drm_sched_dependency_optimized(struct dma_fence* fence,
170 				    struct drm_sched_entity *entity);
171 void drm_sched_job_kickout(struct drm_sched_job *s_job);
172 
173 #endif
174