1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_RING_H__
25 #define __AMDGPU_RING_H__
26 
27 #include <drm/amdgpu_drm.h>
28 #include <drm/gpu_scheduler.h>
29 #include <drm/drm_print.h>
30 
31 /* max number of rings */
32 #define AMDGPU_MAX_RINGS		28
33 #define AMDGPU_MAX_HWIP_RINGS		8
34 #define AMDGPU_MAX_GFX_RINGS		2
35 #define AMDGPU_MAX_COMPUTE_RINGS	8
36 #define AMDGPU_MAX_VCE_RINGS		3
37 #define AMDGPU_MAX_UVD_ENC_RINGS	2
38 
39 enum amdgpu_ring_priority_level {
40 	AMDGPU_RING_PRIO_0,
41 	AMDGPU_RING_PRIO_1,
42 	AMDGPU_RING_PRIO_DEFAULT = 1,
43 	AMDGPU_RING_PRIO_2,
44 	AMDGPU_RING_PRIO_MAX
45 };
46 
47 /* some special values for the owner field */
48 #define AMDGPU_FENCE_OWNER_UNDEFINED	((void *)0ul)
49 #define AMDGPU_FENCE_OWNER_VM		((void *)1ul)
50 #define AMDGPU_FENCE_OWNER_KFD		((void *)2ul)
51 
52 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
53 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
54 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
55 
56 /* fence flag bit to indicate the face is embedded in job*/
57 #define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT		(DMA_FENCE_FLAG_USER_BITS + 1)
58 
59 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
60 
61 #define AMDGPU_IB_POOL_SIZE	(1024 * 1024)
62 
63 enum amdgpu_ring_type {
64 	AMDGPU_RING_TYPE_GFX		= AMDGPU_HW_IP_GFX,
65 	AMDGPU_RING_TYPE_COMPUTE	= AMDGPU_HW_IP_COMPUTE,
66 	AMDGPU_RING_TYPE_SDMA		= AMDGPU_HW_IP_DMA,
67 	AMDGPU_RING_TYPE_UVD		= AMDGPU_HW_IP_UVD,
68 	AMDGPU_RING_TYPE_VCE		= AMDGPU_HW_IP_VCE,
69 	AMDGPU_RING_TYPE_UVD_ENC	= AMDGPU_HW_IP_UVD_ENC,
70 	AMDGPU_RING_TYPE_VCN_DEC	= AMDGPU_HW_IP_VCN_DEC,
71 	AMDGPU_RING_TYPE_VCN_ENC	= AMDGPU_HW_IP_VCN_ENC,
72 	AMDGPU_RING_TYPE_VCN_JPEG	= AMDGPU_HW_IP_VCN_JPEG,
73 	AMDGPU_RING_TYPE_KIQ,
74 	AMDGPU_RING_TYPE_MES
75 };
76 
77 enum amdgpu_ib_pool_type {
78 	/* Normal submissions to the top of the pipeline. */
79 	AMDGPU_IB_POOL_DELAYED,
80 	/* Immediate submissions to the bottom of the pipeline. */
81 	AMDGPU_IB_POOL_IMMEDIATE,
82 	/* Direct submission to the ring buffer during init and reset. */
83 	AMDGPU_IB_POOL_DIRECT,
84 
85 	AMDGPU_IB_POOL_MAX
86 };
87 
88 struct amdgpu_device;
89 struct amdgpu_ring;
90 struct amdgpu_ib;
91 struct amdgpu_cs_parser;
92 struct amdgpu_job;
93 
94 struct amdgpu_sched {
95 	u32				num_scheds;
96 	struct drm_gpu_scheduler	*sched[AMDGPU_MAX_HWIP_RINGS];
97 };
98 
99 /*
100  * Fences.
101  */
102 struct amdgpu_fence_driver {
103 	uint64_t			gpu_addr;
104 	volatile uint32_t		*cpu_addr;
105 	/* sync_seq is protected by ring emission lock */
106 	uint32_t			sync_seq;
107 	atomic_t			last_seq;
108 	bool				initialized;
109 	struct amdgpu_irq_src		*irq_src;
110 	unsigned			irq_type;
111 	struct timer_list		fallback_timer;
112 	unsigned			num_fences_mask;
113 	spinlock_t			lock;
114 	struct dma_fence		**fences;
115 };
116 
117 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
118 
119 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
120 				  unsigned num_hw_submission,
121 				  atomic_t *sched_score);
122 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
123 				   struct amdgpu_irq_src *irq_src,
124 				   unsigned irq_type);
125 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
126 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
127 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
128 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
129 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
130 		      unsigned flags);
131 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
132 			      uint32_t timeout);
133 bool amdgpu_fence_process(struct amdgpu_ring *ring);
134 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
135 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
136 				      uint32_t wait_seq,
137 				      signed long timeout);
138 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
139 
140 /*
141  * Rings.
142  */
143 
144 /* provided by hw blocks that expose a ring buffer for commands */
145 struct amdgpu_ring_funcs {
146 	enum amdgpu_ring_type	type;
147 	uint32_t		align_mask;
148 	u32			nop;
149 	bool			support_64bit_ptrs;
150 	bool			no_user_fence;
151 	unsigned		vmhub;
152 	unsigned		extra_dw;
153 
154 	/* ring read/write ptr handling */
155 	u64 (*get_rptr)(struct amdgpu_ring *ring);
156 	u64 (*get_wptr)(struct amdgpu_ring *ring);
157 	void (*set_wptr)(struct amdgpu_ring *ring);
158 	/* validating and patching of IBs */
159 	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
160 	int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
161 	/* constants to calculate how many DW are needed for an emit */
162 	unsigned emit_frame_size;
163 	unsigned emit_ib_size;
164 	/* command emit functions */
165 	void (*emit_ib)(struct amdgpu_ring *ring,
166 			struct amdgpu_job *job,
167 			struct amdgpu_ib *ib,
168 			uint32_t flags);
169 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
170 			   uint64_t seq, unsigned flags);
171 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
172 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
173 			      uint64_t pd_addr);
174 	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
175 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
176 				uint32_t gds_base, uint32_t gds_size,
177 				uint32_t gws_base, uint32_t gws_size,
178 				uint32_t oa_base, uint32_t oa_size);
179 	/* testing functions */
180 	int (*test_ring)(struct amdgpu_ring *ring);
181 	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
182 	/* insert NOP packets */
183 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
184 	void (*insert_start)(struct amdgpu_ring *ring);
185 	void (*insert_end)(struct amdgpu_ring *ring);
186 	/* pad the indirect buffer to the necessary number of dw */
187 	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
188 	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
189 	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
190 	/* note usage for clock and power gating */
191 	void (*begin_use)(struct amdgpu_ring *ring);
192 	void (*end_use)(struct amdgpu_ring *ring);
193 	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
194 	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
195 	void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
196 			  uint32_t reg_val_offs);
197 	void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
198 	void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
199 			      uint32_t val, uint32_t mask);
200 	void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
201 					uint32_t reg0, uint32_t reg1,
202 					uint32_t ref, uint32_t mask);
203 	void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
204 				bool secure);
205 	/* Try to soft recover the ring to make the fence signal */
206 	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
207 	int (*preempt_ib)(struct amdgpu_ring *ring);
208 	void (*emit_mem_sync)(struct amdgpu_ring *ring);
209 	void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
210 };
211 
212 struct amdgpu_ring {
213 	struct amdgpu_device		*adev;
214 	const struct amdgpu_ring_funcs	*funcs;
215 	struct amdgpu_fence_driver	fence_drv;
216 	struct drm_gpu_scheduler	sched;
217 
218 	struct amdgpu_bo	*ring_obj;
219 	volatile uint32_t	*ring;
220 	unsigned		rptr_offs;
221 	u64			wptr;
222 	u64			wptr_old;
223 	unsigned		ring_size;
224 	unsigned		max_dw;
225 	int			count_dw;
226 	uint64_t		gpu_addr;
227 	uint64_t		ptr_mask;
228 	uint32_t		buf_mask;
229 	u32			idx;
230 	u32			me;
231 	u32			pipe;
232 	u32			queue;
233 	struct amdgpu_bo	*mqd_obj;
234 	uint64_t                mqd_gpu_addr;
235 	void                    *mqd_ptr;
236 	uint64_t                eop_gpu_addr;
237 	u32			doorbell_index;
238 	bool			use_doorbell;
239 	bool			use_pollmem;
240 	unsigned		wptr_offs;
241 	unsigned		fence_offs;
242 	uint64_t		current_ctx;
243 	char			name[16];
244 	u32                     trail_seq;
245 	unsigned		trail_fence_offs;
246 	u64			trail_fence_gpu_addr;
247 	volatile u32		*trail_fence_cpu_addr;
248 	unsigned		cond_exe_offs;
249 	u64			cond_exe_gpu_addr;
250 	volatile u32		*cond_exe_cpu_addr;
251 	unsigned		vm_inv_eng;
252 	struct dma_fence	*vmid_wait;
253 	bool			has_compute_vm_bug;
254 	bool			no_scheduler;
255 	int			hw_prio;
256 };
257 
258 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
259 #define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
260 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
261 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
262 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
263 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
264 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
265 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
266 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
267 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
268 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
269 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
270 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
271 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
272 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
273 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
274 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
275 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
276 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
277 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
278 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
279 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
280 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
281 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
282 
283 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
284 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
285 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
286 void amdgpu_ring_commit(struct amdgpu_ring *ring);
287 void amdgpu_ring_undo(struct amdgpu_ring *ring);
288 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
289 		     unsigned int ring_size, struct amdgpu_irq_src *irq_src,
290 		     unsigned int irq_type, unsigned int prio,
291 		     atomic_t *sched_score);
292 void amdgpu_ring_fini(struct amdgpu_ring *ring);
293 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
294 						uint32_t reg0, uint32_t val0,
295 						uint32_t reg1, uint32_t val1);
296 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
297 			       struct dma_fence *fence);
298 
299 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
300 							bool cond_exec)
301 {
302 	*ring->cond_exe_cpu_addr = cond_exec;
303 }
304 
305 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
306 {
307 	int i = 0;
308 	while (i <= ring->buf_mask)
309 		ring->ring[i++] = ring->funcs->nop;
310 
311 }
312 
313 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
314 {
315 	if (ring->count_dw <= 0)
316 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
317 	ring->ring[ring->wptr++ & ring->buf_mask] = v;
318 	ring->wptr &= ring->ptr_mask;
319 	ring->count_dw--;
320 }
321 
322 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
323 					      void *src, int count_dw)
324 {
325 	unsigned occupied, chunk1, chunk2;
326 	void *dst;
327 
328 	if (unlikely(ring->count_dw < count_dw))
329 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
330 
331 	occupied = ring->wptr & ring->buf_mask;
332 	dst = (void *)&ring->ring[occupied];
333 	chunk1 = ring->buf_mask + 1 - occupied;
334 	chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
335 	chunk2 = count_dw - chunk1;
336 	chunk1 <<= 2;
337 	chunk2 <<= 2;
338 
339 	if (chunk1)
340 		memcpy(dst, src, chunk1);
341 
342 	if (chunk2) {
343 		src += chunk1;
344 		dst = (void *)ring->ring;
345 		memcpy(dst, src, chunk2);
346 	}
347 
348 	ring->wptr += count_dw;
349 	ring->wptr &= ring->ptr_mask;
350 	ring->count_dw -= count_dw;
351 }
352 
353 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
354 
355 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
356 			      struct amdgpu_ring *ring);
357 #endif
358