1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher * Copyright 2008 Advanced Micro Devices, Inc.
3d38ceaf9SAlex Deucher * Copyright 2008 Red Hat Inc.
4d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse.
5d38ceaf9SAlex Deucher *
6d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a
7d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the "Software"),
8d38ceaf9SAlex Deucher * to deal in the Software without restriction, including without limitation
9d38ceaf9SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10d38ceaf9SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the
11d38ceaf9SAlex Deucher * Software is furnished to do so, subject to the following conditions:
12d38ceaf9SAlex Deucher *
13d38ceaf9SAlex Deucher * The above copyright notice and this permission notice shall be included in
14d38ceaf9SAlex Deucher * all copies or substantial portions of the Software.
15d38ceaf9SAlex Deucher *
16d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20d38ceaf9SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21d38ceaf9SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22d38ceaf9SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE.
23d38ceaf9SAlex Deucher *
24d38ceaf9SAlex Deucher * Authors: Dave Airlie
25d38ceaf9SAlex Deucher * Alex Deucher
26d38ceaf9SAlex Deucher * Jerome Glisse
27d38ceaf9SAlex Deucher * Christian König
28d38ceaf9SAlex Deucher */
29d38ceaf9SAlex Deucher #include <linux/seq_file.h>
30d38ceaf9SAlex Deucher #include <linux/slab.h>
31fdf2f6c5SSam Ravnborg #include <linux/uaccess.h>
324f4824b5STom St Denis #include <linux/debugfs.h>
33fdf2f6c5SSam Ravnborg
34d38ceaf9SAlex Deucher #include <drm/amdgpu_drm.h>
35d38ceaf9SAlex Deucher #include "amdgpu.h"
36d38ceaf9SAlex Deucher #include "atom.h"
37d38ceaf9SAlex Deucher
38d38ceaf9SAlex Deucher /*
39d38ceaf9SAlex Deucher * Rings
40d38ceaf9SAlex Deucher * Most engines on the GPU are fed via ring buffers. Ring
41d38ceaf9SAlex Deucher * buffers are areas of GPU accessible memory that the host
42d38ceaf9SAlex Deucher * writes commands into and the GPU reads commands out of.
43d38ceaf9SAlex Deucher * There is a rptr (read pointer) that determines where the
44d38ceaf9SAlex Deucher * GPU is currently reading, and a wptr (write pointer)
45d38ceaf9SAlex Deucher * which determines where the host has written. When the
46d38ceaf9SAlex Deucher * pointers are equal, the ring is idle. When the host
47d38ceaf9SAlex Deucher * writes commands to the ring buffer, it increments the
48d38ceaf9SAlex Deucher * wptr. The GPU then starts fetching commands and executes
49d38ceaf9SAlex Deucher * them until the pointers are equal again.
50d38ceaf9SAlex Deucher */
51d38ceaf9SAlex Deucher
52d38ceaf9SAlex Deucher /**
53c30ddcecSBas Nieuwenhuizen * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
54c30ddcecSBas Nieuwenhuizen *
55c30ddcecSBas Nieuwenhuizen * @type: ring type for which to return the limit.
56c30ddcecSBas Nieuwenhuizen */
amdgpu_ring_max_ibs(enum amdgpu_ring_type type)57c30ddcecSBas Nieuwenhuizen unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type)
58c30ddcecSBas Nieuwenhuizen {
59c30ddcecSBas Nieuwenhuizen switch (type) {
60c30ddcecSBas Nieuwenhuizen case AMDGPU_RING_TYPE_GFX:
61c30ddcecSBas Nieuwenhuizen /* Need to keep at least 192 on GFX7+ for old radv. */
62c30ddcecSBas Nieuwenhuizen return 192;
63c30ddcecSBas Nieuwenhuizen case AMDGPU_RING_TYPE_COMPUTE:
64c30ddcecSBas Nieuwenhuizen return 125;
65c30ddcecSBas Nieuwenhuizen case AMDGPU_RING_TYPE_VCN_JPEG:
66c30ddcecSBas Nieuwenhuizen return 16;
67c30ddcecSBas Nieuwenhuizen default:
68c30ddcecSBas Nieuwenhuizen return 49;
69c30ddcecSBas Nieuwenhuizen }
70c30ddcecSBas Nieuwenhuizen }
71c30ddcecSBas Nieuwenhuizen
72c30ddcecSBas Nieuwenhuizen /**
73d38ceaf9SAlex Deucher * amdgpu_ring_alloc - allocate space on the ring buffer
74d38ceaf9SAlex Deucher *
75d38ceaf9SAlex Deucher * @ring: amdgpu_ring structure holding ring information
76d38ceaf9SAlex Deucher * @ndw: number of dwords to allocate in the ring buffer
77d38ceaf9SAlex Deucher *
78d38ceaf9SAlex Deucher * Allocate @ndw dwords in the ring buffer (all asics).
79d38ceaf9SAlex Deucher * Returns 0 on success, error on failure.
80d38ceaf9SAlex Deucher */
amdgpu_ring_alloc(struct amdgpu_ring * ring,unsigned int ndw)811d6ecab1SSrinivasan Shanmugam int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
82d38ceaf9SAlex Deucher {
83d38ceaf9SAlex Deucher /* Align requested size with padding so unlock_commit can
84d38ceaf9SAlex Deucher * pad safely */
8579887142SChristian König ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
86c7e6be23SChristian König
87c7e6be23SChristian König /* Make sure we aren't trying to allocate more space
88c7e6be23SChristian König * than the maximum for one submission
89c7e6be23SChristian König */
90c7e6be23SChristian König if (WARN_ON_ONCE(ndw > ring->max_dw))
91c7e6be23SChristian König return -ENOMEM;
92c7e6be23SChristian König
93d38ceaf9SAlex Deucher ring->count_dw = ndw;
94d38ceaf9SAlex Deucher ring->wptr_old = ring->wptr;
95f06505b8SChristian König
96f06505b8SChristian König if (ring->funcs->begin_use)
97f06505b8SChristian König ring->funcs->begin_use(ring);
98f06505b8SChristian König
99d38ceaf9SAlex Deucher return 0;
100d38ceaf9SAlex Deucher }
101d38ceaf9SAlex Deucher
102edff0e28SJammy Zhou /** amdgpu_ring_insert_nop - insert NOP packets
103edff0e28SJammy Zhou *
104edff0e28SJammy Zhou * @ring: amdgpu_ring structure holding ring information
105edff0e28SJammy Zhou * @count: the number of NOP packets to insert
106edff0e28SJammy Zhou *
107edff0e28SJammy Zhou * This is the generic insert_nop function for rings except SDMA
108edff0e28SJammy Zhou */
amdgpu_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)109edff0e28SJammy Zhou void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
110edff0e28SJammy Zhou {
111edff0e28SJammy Zhou int i;
112edff0e28SJammy Zhou
113edff0e28SJammy Zhou for (i = 0; i < count; i++)
11479887142SChristian König amdgpu_ring_write(ring, ring->funcs->nop);
115edff0e28SJammy Zhou }
116edff0e28SJammy Zhou
11735c7fad9SLee Jones /**
11835c7fad9SLee Jones * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
1199e5d5309SChristian König *
1209e5d5309SChristian König * @ring: amdgpu_ring structure holding ring information
1219e5d5309SChristian König * @ib: IB to add NOP packets to
1229e5d5309SChristian König *
1239e5d5309SChristian König * This is the generic pad_ib function for rings except SDMA
1249e5d5309SChristian König */
amdgpu_ring_generic_pad_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib)1259e5d5309SChristian König void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1269e5d5309SChristian König {
12779887142SChristian König while (ib->length_dw & ring->funcs->align_mask)
12879887142SChristian König ib->ptr[ib->length_dw++] = ring->funcs->nop;
1299e5d5309SChristian König }
1309e5d5309SChristian König
131d38ceaf9SAlex Deucher /**
132d38ceaf9SAlex Deucher * amdgpu_ring_commit - tell the GPU to execute the new
133d38ceaf9SAlex Deucher * commands on the ring buffer
134d38ceaf9SAlex Deucher *
135d38ceaf9SAlex Deucher * @ring: amdgpu_ring structure holding ring information
136d38ceaf9SAlex Deucher *
137d38ceaf9SAlex Deucher * Update the wptr (write pointer) to tell the GPU to
138d38ceaf9SAlex Deucher * execute new commands on the ring buffer (all asics).
139d38ceaf9SAlex Deucher */
amdgpu_ring_commit(struct amdgpu_ring * ring)140d38ceaf9SAlex Deucher void amdgpu_ring_commit(struct amdgpu_ring *ring)
141d38ceaf9SAlex Deucher {
142edff0e28SJammy Zhou uint32_t count;
143edff0e28SJammy Zhou
144d38ceaf9SAlex Deucher /* We pad to match fetch size */
14579887142SChristian König count = ring->funcs->align_mask + 1 -
14679887142SChristian König (ring->wptr & ring->funcs->align_mask);
14779887142SChristian König count %= ring->funcs->align_mask + 1;
148edff0e28SJammy Zhou ring->funcs->insert_nop(ring, count);
149edff0e28SJammy Zhou
150d38ceaf9SAlex Deucher mb();
151d38ceaf9SAlex Deucher amdgpu_ring_set_wptr(ring);
152f06505b8SChristian König
153f06505b8SChristian König if (ring->funcs->end_use)
154f06505b8SChristian König ring->funcs->end_use(ring);
155d38ceaf9SAlex Deucher }
156d38ceaf9SAlex Deucher
157d38ceaf9SAlex Deucher /**
158d38ceaf9SAlex Deucher * amdgpu_ring_undo - reset the wptr
159d38ceaf9SAlex Deucher *
160d38ceaf9SAlex Deucher * @ring: amdgpu_ring structure holding ring information
161d38ceaf9SAlex Deucher *
162d38ceaf9SAlex Deucher * Reset the driver's copy of the wptr (all asics).
163d38ceaf9SAlex Deucher */
amdgpu_ring_undo(struct amdgpu_ring * ring)164d38ceaf9SAlex Deucher void amdgpu_ring_undo(struct amdgpu_ring *ring)
165d38ceaf9SAlex Deucher {
166d38ceaf9SAlex Deucher ring->wptr = ring->wptr_old;
167f06505b8SChristian König
168f06505b8SChristian König if (ring->funcs->end_use)
169f06505b8SChristian König ring->funcs->end_use(ring);
170d38ceaf9SAlex Deucher }
171d38ceaf9SAlex Deucher
172502b6cefSJack Xiao #define amdgpu_ring_get_gpu_addr(ring, offset) \
173502b6cefSJack Xiao (ring->is_mes_queue ? \
174502b6cefSJack Xiao (ring->mes_ctx->meta_data_gpu_addr + offset) : \
175502b6cefSJack Xiao (ring->adev->wb.gpu_addr + offset * 4))
176502b6cefSJack Xiao
177502b6cefSJack Xiao #define amdgpu_ring_get_cpu_addr(ring, offset) \
178502b6cefSJack Xiao (ring->is_mes_queue ? \
179502b6cefSJack Xiao (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
180502b6cefSJack Xiao (&ring->adev->wb.wb[offset]))
181502b6cefSJack Xiao
182d38ceaf9SAlex Deucher /**
183d38ceaf9SAlex Deucher * amdgpu_ring_init - init driver ring struct.
184d38ceaf9SAlex Deucher *
185d38ceaf9SAlex Deucher * @adev: amdgpu_device pointer
186d38ceaf9SAlex Deucher * @ring: amdgpu_ring structure holding ring information
18780854e83SLee Jones * @max_dw: maximum number of dw for ring alloc
18835c7fad9SLee Jones * @irq_src: interrupt source to use for this ring
18935c7fad9SLee Jones * @irq_type: interrupt type to use for this ring
19035c7fad9SLee Jones * @hw_prio: ring priority (NORMAL/HIGH)
19103691f55SLee Jones * @sched_score: optional score atomic shared with other schedulers
192d38ceaf9SAlex Deucher *
193d38ceaf9SAlex Deucher * Initialize the driver information for the selected ring (all asics).
194d38ceaf9SAlex Deucher * Returns 0 on success, error on failure.
195d38ceaf9SAlex Deucher */
amdgpu_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int max_dw,struct amdgpu_irq_src * irq_src,unsigned int irq_type,unsigned int hw_prio,atomic_t * sched_score)196d38ceaf9SAlex Deucher int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1971c6d567bSNirmoy Das unsigned int max_dw, struct amdgpu_irq_src *irq_src,
198c107171bSChristian König unsigned int irq_type, unsigned int hw_prio,
199c107171bSChristian König atomic_t *sched_score)
200d38ceaf9SAlex Deucher {
201ebdd2e9dSNirmoy Das int r;
202b249e18dSAlex Deucher int sched_hw_submission = amdgpu_sched_hw_submission;
2031c6d567bSNirmoy Das u32 *num_sched;
2041c6d567bSNirmoy Das u32 hw_ip;
205c30ddcecSBas Nieuwenhuizen unsigned int max_ibs_dw;
206b249e18dSAlex Deucher
207b249e18dSAlex Deucher /* Set the hw submission limit higher for KIQ because
208b249e18dSAlex Deucher * it's used for a number of gfx/compute tasks by both
209b249e18dSAlex Deucher * KFD and KGD which may have outstanding fences and
210b249e18dSAlex Deucher * it doesn't really use the gpu scheduler anyway;
211b249e18dSAlex Deucher * KIQ tasks get submitted directly to the ring.
212b249e18dSAlex Deucher */
213b249e18dSAlex Deucher if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
214b249e18dSAlex Deucher sched_hw_submission = max(sched_hw_submission, 256);
2151d31408aSChristian König else if (ring == &adev->sdma.instance[0].page)
2161d31408aSChristian König sched_hw_submission = 256;
217d38ceaf9SAlex Deucher
218d38ceaf9SAlex Deucher if (ring->adev == NULL) {
219d38ceaf9SAlex Deucher if (adev->num_rings >= AMDGPU_MAX_RINGS)
220d38ceaf9SAlex Deucher return -EINVAL;
221d38ceaf9SAlex Deucher
222d38ceaf9SAlex Deucher ring->adev = adev;
2235fd8518dSAndrey Grodzovsky ring->num_hw_submission = sched_hw_submission;
2245fd8518dSAndrey Grodzovsky ring->sched_score = sched_score;
22548e9fbd1SChristian König ring->vmid_wait = dma_fence_get_stub();
226502b6cefSJack Xiao
227502b6cefSJack Xiao if (!ring->is_mes_queue) {
228502b6cefSJack Xiao ring->idx = adev->num_rings++;
229502b6cefSJack Xiao adev->rings[ring->idx] = ring;
230502b6cefSJack Xiao }
231502b6cefSJack Xiao
2325fd8518dSAndrey Grodzovsky r = amdgpu_fence_driver_init_ring(ring);
2334f839a24SChristian König if (r)
2344f839a24SChristian König return r;
235d38ceaf9SAlex Deucher }
236d38ceaf9SAlex Deucher
237502b6cefSJack Xiao if (ring->is_mes_queue) {
238502b6cefSJack Xiao ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
239502b6cefSJack Xiao AMDGPU_MES_CTX_RPTR_OFFS);
240502b6cefSJack Xiao ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
241502b6cefSJack Xiao AMDGPU_MES_CTX_WPTR_OFFS);
242502b6cefSJack Xiao ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
243502b6cefSJack Xiao AMDGPU_MES_CTX_FENCE_OFFS);
244502b6cefSJack Xiao ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
245502b6cefSJack Xiao AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
246502b6cefSJack Xiao ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
247502b6cefSJack Xiao AMDGPU_MES_CTX_COND_EXE_OFFS);
248502b6cefSJack Xiao } else {
249131b4b36SAlex Deucher r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
250d38ceaf9SAlex Deucher if (r) {
251d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
252d38ceaf9SAlex Deucher return r;
253d38ceaf9SAlex Deucher }
254d38ceaf9SAlex Deucher
255131b4b36SAlex Deucher r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
256d38ceaf9SAlex Deucher if (r) {
257d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
258d38ceaf9SAlex Deucher return r;
259d38ceaf9SAlex Deucher }
260d38ceaf9SAlex Deucher
261131b4b36SAlex Deucher r = amdgpu_device_wb_get(adev, &ring->fence_offs);
262d38ceaf9SAlex Deucher if (r) {
263d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
264d38ceaf9SAlex Deucher return r;
265d38ceaf9SAlex Deucher }
266d38ceaf9SAlex Deucher
267ef3e1323SJack Xiao r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
268ef3e1323SJack Xiao if (r) {
269502b6cefSJack Xiao dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
270ef3e1323SJack Xiao return r;
271ef3e1323SJack Xiao }
272ef3e1323SJack Xiao
273131b4b36SAlex Deucher r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
274128cff1aSMonk Liu if (r) {
275128cff1aSMonk Liu dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
276128cff1aSMonk Liu return r;
277128cff1aSMonk Liu }
278502b6cefSJack Xiao }
279502b6cefSJack Xiao
280502b6cefSJack Xiao ring->fence_gpu_addr =
281502b6cefSJack Xiao amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
282502b6cefSJack Xiao ring->fence_cpu_addr =
283502b6cefSJack Xiao amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
284502b6cefSJack Xiao
285502b6cefSJack Xiao ring->rptr_gpu_addr =
286502b6cefSJack Xiao amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
287502b6cefSJack Xiao ring->rptr_cpu_addr =
288502b6cefSJack Xiao amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
289502b6cefSJack Xiao
290502b6cefSJack Xiao ring->wptr_gpu_addr =
291502b6cefSJack Xiao amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
292502b6cefSJack Xiao ring->wptr_cpu_addr =
293502b6cefSJack Xiao amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
294502b6cefSJack Xiao
295502b6cefSJack Xiao ring->trail_fence_gpu_addr =
296502b6cefSJack Xiao amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
297502b6cefSJack Xiao ring->trail_fence_cpu_addr =
298502b6cefSJack Xiao amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
299502b6cefSJack Xiao
300502b6cefSJack Xiao ring->cond_exe_gpu_addr =
301502b6cefSJack Xiao amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
302502b6cefSJack Xiao ring->cond_exe_cpu_addr =
303502b6cefSJack Xiao amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
304502b6cefSJack Xiao
305714fbf80SMonk Liu /* always set cond_exec_polling to CONTINUE */
306714fbf80SMonk Liu *ring->cond_exe_cpu_addr = 1;
307128cff1aSMonk Liu
308d38ceaf9SAlex Deucher r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
309d38ceaf9SAlex Deucher if (r) {
310d38ceaf9SAlex Deucher dev_err(adev->dev, "failed initializing fences (%d).\n", r);
311d38ceaf9SAlex Deucher return r;
312d38ceaf9SAlex Deucher }
313d38ceaf9SAlex Deucher
314c30ddcecSBas Nieuwenhuizen max_ibs_dw = ring->funcs->emit_frame_size +
315c30ddcecSBas Nieuwenhuizen amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
316c30ddcecSBas Nieuwenhuizen max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
317c30ddcecSBas Nieuwenhuizen
3181d6ecab1SSrinivasan Shanmugam if (WARN_ON(max_ibs_dw > max_dw))
319c30ddcecSBas Nieuwenhuizen max_dw = max_ibs_dw;
320c30ddcecSBas Nieuwenhuizen
321b249e18dSAlex Deucher ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
322d38ceaf9SAlex Deucher
323e09706f4SMonk Liu ring->buf_mask = (ring->ring_size / 4) - 1;
324e09706f4SMonk Liu ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
325e09706f4SMonk Liu 0xffffffffffffffff : ring->buf_mask;
326502b6cefSJack Xiao
327d38ceaf9SAlex Deucher /* Allocate ring buffer */
328502b6cefSJack Xiao if (ring->is_mes_queue) {
329502b6cefSJack Xiao int offset = 0;
330502b6cefSJack Xiao
331502b6cefSJack Xiao BUG_ON(ring->ring_size > PAGE_SIZE*4);
332502b6cefSJack Xiao
333502b6cefSJack Xiao offset = amdgpu_mes_ctx_get_offs(ring,
334502b6cefSJack Xiao AMDGPU_MES_CTX_RING_OFFS);
335502b6cefSJack Xiao ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
336502b6cefSJack Xiao ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
337502b6cefSJack Xiao amdgpu_ring_clear_ring(ring);
338502b6cefSJack Xiao
339502b6cefSJack Xiao } else if (ring->ring_obj == NULL) {
340c8c1a1d2SBoyuan Zhang r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
34137ac235bSChristian König AMDGPU_GEM_DOMAIN_GTT,
34237ac235bSChristian König &ring->ring_obj,
34337ac235bSChristian König &ring->gpu_addr,
34437ac235bSChristian König (void **)&ring->ring);
345d38ceaf9SAlex Deucher if (r) {
346d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) ring create failed\n", r);
347d38ceaf9SAlex Deucher return r;
348d38ceaf9SAlex Deucher }
349f6bd7942SMonk Liu amdgpu_ring_clear_ring(ring);
350d38ceaf9SAlex Deucher }
351536fbf94SKen Wang
352a3f1cf35SChristian König ring->max_dw = max_dw;
353ebdd2e9dSNirmoy Das ring->hw_prio = hw_prio;
354d38ceaf9SAlex Deucher
355*a60d1f7fSMa Jun if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
3561c6d567bSNirmoy Das hw_ip = ring->funcs->type;
3571c6d567bSNirmoy Das num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
3581c6d567bSNirmoy Das adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
3591c6d567bSNirmoy Das &ring->sched;
3601c6d567bSNirmoy Das }
3611c6d567bSNirmoy Das
362d38ceaf9SAlex Deucher return 0;
363d38ceaf9SAlex Deucher }
364d38ceaf9SAlex Deucher
365d38ceaf9SAlex Deucher /**
366d38ceaf9SAlex Deucher * amdgpu_ring_fini - tear down the driver ring struct.
367d38ceaf9SAlex Deucher *
368d38ceaf9SAlex Deucher * @ring: amdgpu_ring structure holding ring information
369d38ceaf9SAlex Deucher *
370d38ceaf9SAlex Deucher * Tear down the driver information for the selected ring (all asics).
371d38ceaf9SAlex Deucher */
amdgpu_ring_fini(struct amdgpu_ring * ring)372d38ceaf9SAlex Deucher void amdgpu_ring_fini(struct amdgpu_ring *ring)
373d38ceaf9SAlex Deucher {
374d38ceaf9SAlex Deucher
37541cc07cfSTrigger Huang /* Not to finish a ring which is not initialized */
376502b6cefSJack Xiao if (!(ring->adev) ||
377502b6cefSJack Xiao (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
37841cc07cfSTrigger Huang return;
37941cc07cfSTrigger Huang
3806f9f9604SNirmoy Das ring->sched.ready = false;
3816f9f9604SNirmoy Das
382502b6cefSJack Xiao if (!ring->is_mes_queue) {
383131b4b36SAlex Deucher amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
384131b4b36SAlex Deucher amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
3857014285aSKen Wang
386131b4b36SAlex Deucher amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
387131b4b36SAlex Deucher amdgpu_device_wb_free(ring->adev, ring->fence_offs);
388d38ceaf9SAlex Deucher
3898640faedSJunwei Zhang amdgpu_bo_free_kernel(&ring->ring_obj,
3908640faedSJunwei Zhang &ring->gpu_addr,
3918640faedSJunwei Zhang (void **)&ring->ring);
39231d7c3a4SJack Xiao } else {
39331d7c3a4SJack Xiao kfree(ring->fence_drv.fences);
394502b6cefSJack Xiao }
3958640faedSJunwei Zhang
3963af81440SChristian König dma_fence_put(ring->vmid_wait);
3973af81440SChristian König ring->vmid_wait = NULL;
39810dd74eaSJames Zhu ring->me = 0;
3993af81440SChristian König
400502b6cefSJack Xiao if (!ring->is_mes_queue)
401d8907643SGrazvydas Ignotas ring->adev->rings[ring->idx] = NULL;
402d38ceaf9SAlex Deucher }
403d38ceaf9SAlex Deucher
40482853638SAlex Deucher /**
40582853638SAlex Deucher * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
40682853638SAlex Deucher *
40735c7fad9SLee Jones * @ring: ring to write to
40882853638SAlex Deucher * @reg0: register to write
40982853638SAlex Deucher * @reg1: register to wait on
41082853638SAlex Deucher * @ref: reference value to write/wait on
41182853638SAlex Deucher * @mask: mask to wait on
41282853638SAlex Deucher *
41382853638SAlex Deucher * Helper for rings that don't support write and wait in a
41482853638SAlex Deucher * single oneshot packet.
41582853638SAlex Deucher */
amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)41682853638SAlex Deucher void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
41782853638SAlex Deucher uint32_t reg0, uint32_t reg1,
41882853638SAlex Deucher uint32_t ref, uint32_t mask)
41982853638SAlex Deucher {
42082853638SAlex Deucher amdgpu_ring_emit_wreg(ring, reg0, ref);
42182853638SAlex Deucher amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
42282853638SAlex Deucher }
42382853638SAlex Deucher
4247876fa4fSChristian König /**
4257876fa4fSChristian König * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
4267876fa4fSChristian König *
4277876fa4fSChristian König * @ring: ring to try the recovery on
4287876fa4fSChristian König * @vmid: VMID we try to get going again
4297876fa4fSChristian König * @fence: timedout fence
4307876fa4fSChristian König *
4317876fa4fSChristian König * Tries to get a ring proceeding again when it is stuck.
4327876fa4fSChristian König */
amdgpu_ring_soft_recovery(struct amdgpu_ring * ring,unsigned int vmid,struct dma_fence * fence)4337876fa4fSChristian König bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
4347876fa4fSChristian König struct dma_fence *fence)
4357876fa4fSChristian König {
43689fae8dcSChristian König unsigned long flags;
43789fae8dcSChristian König
4387876fa4fSChristian König ktime_t deadline = ktime_add_us(ktime_get(), 10000);
4397876fa4fSChristian König
440ae1589f6SMonk Liu if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
4417876fa4fSChristian König return false;
4427876fa4fSChristian König
44389fae8dcSChristian König spin_lock_irqsave(fence->lock, flags);
44489fae8dcSChristian König if (!dma_fence_is_signaled_locked(fence))
44589fae8dcSChristian König dma_fence_set_error(fence, -ENODATA);
44689fae8dcSChristian König spin_unlock_irqrestore(fence->lock, flags);
44789fae8dcSChristian König
4487876fa4fSChristian König atomic_inc(&ring->adev->gpu_reset_counter);
4497876fa4fSChristian König while (!dma_fence_is_signaled(fence) &&
4507876fa4fSChristian König ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
4517876fa4fSChristian König ring->funcs->soft_recovery(ring, vmid);
4527876fa4fSChristian König
4537876fa4fSChristian König return dma_fence_is_signaled(fence);
4547876fa4fSChristian König }
4557876fa4fSChristian König
456d38ceaf9SAlex Deucher /*
457d38ceaf9SAlex Deucher * Debugfs info
458d38ceaf9SAlex Deucher */
459d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
460d38ceaf9SAlex Deucher
4614f4824b5STom St Denis /* Layout of file is 12 bytes consisting of
4624f4824b5STom St Denis * - rptr
4634f4824b5STom St Denis * - wptr
4644f4824b5STom St Denis * - driver's copy of wptr
4654f4824b5STom St Denis *
4664f4824b5STom St Denis * followed by n-words of ring data
467d38ceaf9SAlex Deucher */
amdgpu_debugfs_ring_read(struct file * f,char __user * buf,size_t size,loff_t * pos)4684f4824b5STom St Denis static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
4694f4824b5STom St Denis size_t size, loff_t *pos)
4704f4824b5STom St Denis {
47145063097SAl Viro struct amdgpu_ring *ring = file_inode(f)->i_private;
4724f4824b5STom St Denis uint32_t value, result, early[3];
473da22d1b9STim Huang loff_t i;
474da22d1b9STim Huang int r;
4754f4824b5STom St Denis
476c71dbd93STom St Denis if (*pos & 3 || size & 3)
4774f4824b5STom St Denis return -EINVAL;
4784f4824b5STom St Denis
4794f4824b5STom St Denis result = 0;
4804f4824b5STom St Denis
4814f4824b5STom St Denis if (*pos < 12) {
4829c5c71bbSTom St Denis early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
483ec63982eSTom St Denis early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
484ec63982eSTom St Denis early[2] = ring->wptr & ring->buf_mask;
4854f4824b5STom St Denis for (i = *pos / 4; i < 3 && size; i++) {
4864f4824b5STom St Denis r = put_user(early[i], (uint32_t *)buf);
4874f4824b5STom St Denis if (r)
4884f4824b5STom St Denis return r;
4894f4824b5STom St Denis buf += 4;
4904f4824b5STom St Denis result += 4;
4914f4824b5STom St Denis size -= 4;
4924f4824b5STom St Denis *pos += 4;
493c7e6be23SChristian König }
494d38ceaf9SAlex Deucher }
495d38ceaf9SAlex Deucher
4964f4824b5STom St Denis while (size) {
4974f4824b5STom St Denis if (*pos >= (ring->ring_size + 12))
4984f4824b5STom St Denis return result;
4994f4824b5STom St Denis
5004f4824b5STom St Denis value = ring->ring[(*pos - 12)/4];
5014f4824b5STom St Denis r = put_user(value, (uint32_t *)buf);
5024f4824b5STom St Denis if (r)
5034f4824b5STom St Denis return r;
5044f4824b5STom St Denis buf += 4;
5054f4824b5STom St Denis result += 4;
5064f4824b5STom St Denis size -= 4;
5074f4824b5STom St Denis *pos += 4;
5084f4824b5STom St Denis }
5094f4824b5STom St Denis
5104f4824b5STom St Denis return result;
5114f4824b5STom St Denis }
5124f4824b5STom St Denis
5134f4824b5STom St Denis static const struct file_operations amdgpu_debugfs_ring_fops = {
5144f4824b5STom St Denis .owner = THIS_MODULE,
5154f4824b5STom St Denis .read = amdgpu_debugfs_ring_read,
5164f4824b5STom St Denis .llseek = default_llseek
5174f4824b5STom St Denis };
518d38ceaf9SAlex Deucher
amdgpu_debugfs_mqd_read(struct file * f,char __user * buf,size_t size,loff_t * pos)519445d85e3SAlex Deucher static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
520445d85e3SAlex Deucher size_t size, loff_t *pos)
521445d85e3SAlex Deucher {
522445d85e3SAlex Deucher struct amdgpu_ring *ring = file_inode(f)->i_private;
523445d85e3SAlex Deucher volatile u32 *mqd;
524197f6d69SJohannes Weiner u32 *kbuf;
525197f6d69SJohannes Weiner int r, i;
526445d85e3SAlex Deucher uint32_t value, result;
527445d85e3SAlex Deucher
528445d85e3SAlex Deucher if (*pos & 3 || size & 3)
529445d85e3SAlex Deucher return -EINVAL;
530445d85e3SAlex Deucher
531197f6d69SJohannes Weiner kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
532197f6d69SJohannes Weiner if (!kbuf)
533197f6d69SJohannes Weiner return -ENOMEM;
534445d85e3SAlex Deucher
535445d85e3SAlex Deucher r = amdgpu_bo_reserve(ring->mqd_obj, false);
536445d85e3SAlex Deucher if (unlikely(r != 0))
537197f6d69SJohannes Weiner goto err_free;
538445d85e3SAlex Deucher
539445d85e3SAlex Deucher r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
540197f6d69SJohannes Weiner if (r)
541197f6d69SJohannes Weiner goto err_unreserve;
542445d85e3SAlex Deucher
543197f6d69SJohannes Weiner /*
544197f6d69SJohannes Weiner * Copy to local buffer to avoid put_user(), which might fault
545197f6d69SJohannes Weiner * and acquire mmap_sem, under reservation_ww_class_mutex.
546197f6d69SJohannes Weiner */
547197f6d69SJohannes Weiner for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
548197f6d69SJohannes Weiner kbuf[i] = mqd[i];
549197f6d69SJohannes Weiner
550197f6d69SJohannes Weiner amdgpu_bo_kunmap(ring->mqd_obj);
551197f6d69SJohannes Weiner amdgpu_bo_unreserve(ring->mqd_obj);
552197f6d69SJohannes Weiner
553197f6d69SJohannes Weiner result = 0;
554445d85e3SAlex Deucher while (size) {
555445d85e3SAlex Deucher if (*pos >= ring->mqd_size)
556197f6d69SJohannes Weiner break;
557445d85e3SAlex Deucher
558197f6d69SJohannes Weiner value = kbuf[*pos/4];
559445d85e3SAlex Deucher r = put_user(value, (uint32_t *)buf);
560445d85e3SAlex Deucher if (r)
561197f6d69SJohannes Weiner goto err_free;
562445d85e3SAlex Deucher buf += 4;
563445d85e3SAlex Deucher result += 4;
564445d85e3SAlex Deucher size -= 4;
565445d85e3SAlex Deucher *pos += 4;
566445d85e3SAlex Deucher }
567445d85e3SAlex Deucher
568197f6d69SJohannes Weiner kfree(kbuf);
569445d85e3SAlex Deucher return result;
570197f6d69SJohannes Weiner
571197f6d69SJohannes Weiner err_unreserve:
572197f6d69SJohannes Weiner amdgpu_bo_unreserve(ring->mqd_obj);
573197f6d69SJohannes Weiner err_free:
574197f6d69SJohannes Weiner kfree(kbuf);
575197f6d69SJohannes Weiner return r;
576445d85e3SAlex Deucher }
577445d85e3SAlex Deucher
578445d85e3SAlex Deucher static const struct file_operations amdgpu_debugfs_mqd_fops = {
579445d85e3SAlex Deucher .owner = THIS_MODULE,
580445d85e3SAlex Deucher .read = amdgpu_debugfs_mqd_read,
581445d85e3SAlex Deucher .llseek = default_llseek
582445d85e3SAlex Deucher };
583445d85e3SAlex Deucher
amdgpu_debugfs_ring_error(void * data,u64 val)584b13eb02bSChristian König static int amdgpu_debugfs_ring_error(void *data, u64 val)
585b13eb02bSChristian König {
586b13eb02bSChristian König struct amdgpu_ring *ring = data;
587b13eb02bSChristian König
588b13eb02bSChristian König amdgpu_fence_driver_set_error(ring, val);
589b13eb02bSChristian König return 0;
590b13eb02bSChristian König }
591b13eb02bSChristian König
592b13eb02bSChristian König DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL,
593b13eb02bSChristian König amdgpu_debugfs_ring_error, "%lld\n");
594b13eb02bSChristian König
595d38ceaf9SAlex Deucher #endif
596d38ceaf9SAlex Deucher
amdgpu_debugfs_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring)59762d266b2SNirmoy Das void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
598771c8ec1SChristian König struct amdgpu_ring *ring)
599d38ceaf9SAlex Deucher {
600d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
6014a580877SLuben Tuikov struct drm_minor *minor = adev_to_drm(adev)->primary;
60262d266b2SNirmoy Das struct dentry *root = minor->debugfs_root;
6034f4824b5STom St Denis char name[32];
604771c8ec1SChristian König
605771c8ec1SChristian König sprintf(name, "amdgpu_ring_%s", ring->name);
6061d6ecab1SSrinivasan Shanmugam debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
60762d266b2SNirmoy Das &amdgpu_debugfs_ring_fops,
60862d266b2SNirmoy Das ring->ring_size + 12);
609771c8ec1SChristian König
610445d85e3SAlex Deucher if (ring->mqd_obj) {
611445d85e3SAlex Deucher sprintf(name, "amdgpu_mqd_%s", ring->name);
6121d6ecab1SSrinivasan Shanmugam debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
613445d85e3SAlex Deucher &amdgpu_debugfs_mqd_fops,
614445d85e3SAlex Deucher ring->mqd_size);
615445d85e3SAlex Deucher }
616b13eb02bSChristian König
617b13eb02bSChristian König sprintf(name, "amdgpu_error_%s", ring->name);
618b13eb02bSChristian König debugfs_create_file(name, 0200, root, ring,
619b13eb02bSChristian König &amdgpu_debugfs_error_fops);
620b13eb02bSChristian König
621d38ceaf9SAlex Deucher #endif
622d38ceaf9SAlex Deucher }
623a909c6bdSMonk Liu
624c66ed765SAndrey Grodzovsky /**
625c66ed765SAndrey Grodzovsky * amdgpu_ring_test_helper - tests ring and set sched readiness status
626c66ed765SAndrey Grodzovsky *
627c66ed765SAndrey Grodzovsky * @ring: ring to try the recovery on
628c66ed765SAndrey Grodzovsky *
629c66ed765SAndrey Grodzovsky * Tests ring and set sched readiness status
630c66ed765SAndrey Grodzovsky *
631c66ed765SAndrey Grodzovsky * Returns 0 on success, error on failure.
632c66ed765SAndrey Grodzovsky */
amdgpu_ring_test_helper(struct amdgpu_ring * ring)633c66ed765SAndrey Grodzovsky int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
634c66ed765SAndrey Grodzovsky {
635dc9eeff8SChristian König struct amdgpu_device *adev = ring->adev;
636c66ed765SAndrey Grodzovsky int r;
637c66ed765SAndrey Grodzovsky
638c66ed765SAndrey Grodzovsky r = amdgpu_ring_test_ring(ring);
639dc9eeff8SChristian König if (r)
640dc9eeff8SChristian König DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
641dc9eeff8SChristian König ring->name, r);
642dc9eeff8SChristian König else
643dc9eeff8SChristian König DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
644dc9eeff8SChristian König ring->name);
645c66ed765SAndrey Grodzovsky
646c66ed765SAndrey Grodzovsky ring->sched.ready = !r;
647c66ed765SAndrey Grodzovsky return r;
648c66ed765SAndrey Grodzovsky }
64980af9daaSJack Xiao
amdgpu_ring_to_mqd_prop(struct amdgpu_ring * ring,struct amdgpu_mqd_prop * prop)65080af9daaSJack Xiao static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
65180af9daaSJack Xiao struct amdgpu_mqd_prop *prop)
65280af9daaSJack Xiao {
65380af9daaSJack Xiao struct amdgpu_device *adev = ring->adev;
65480af9daaSJack Xiao
65580af9daaSJack Xiao memset(prop, 0, sizeof(*prop));
65680af9daaSJack Xiao
65780af9daaSJack Xiao prop->mqd_gpu_addr = ring->mqd_gpu_addr;
65880af9daaSJack Xiao prop->hqd_base_gpu_addr = ring->gpu_addr;
65980af9daaSJack Xiao prop->rptr_gpu_addr = ring->rptr_gpu_addr;
66080af9daaSJack Xiao prop->wptr_gpu_addr = ring->wptr_gpu_addr;
66180af9daaSJack Xiao prop->queue_size = ring->ring_size;
66280af9daaSJack Xiao prop->eop_gpu_addr = ring->eop_gpu_addr;
66380af9daaSJack Xiao prop->use_doorbell = ring->use_doorbell;
66480af9daaSJack Xiao prop->doorbell_index = ring->doorbell_index;
66580af9daaSJack Xiao
66680af9daaSJack Xiao /* map_queues packet doesn't need activate the queue,
66780af9daaSJack Xiao * so only kiq need set this field.
66880af9daaSJack Xiao */
66980af9daaSJack Xiao prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
67080af9daaSJack Xiao
671b07d1d73SArunpravin Paneer Selvam if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
672b07d1d73SArunpravin Paneer Selvam amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
673b07d1d73SArunpravin Paneer Selvam (ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
674b07d1d73SArunpravin Paneer Selvam amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
67580af9daaSJack Xiao prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
676b07d1d73SArunpravin Paneer Selvam prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
67780af9daaSJack Xiao }
67880af9daaSJack Xiao }
67980af9daaSJack Xiao
amdgpu_ring_init_mqd(struct amdgpu_ring * ring)68080af9daaSJack Xiao int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
68180af9daaSJack Xiao {
68280af9daaSJack Xiao struct amdgpu_device *adev = ring->adev;
68380af9daaSJack Xiao struct amdgpu_mqd *mqd_mgr;
68480af9daaSJack Xiao struct amdgpu_mqd_prop prop;
68580af9daaSJack Xiao
68680af9daaSJack Xiao amdgpu_ring_to_mqd_prop(ring, &prop);
68780af9daaSJack Xiao
68880af9daaSJack Xiao ring->wptr = 0;
68980af9daaSJack Xiao
69080af9daaSJack Xiao if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
69180af9daaSJack Xiao mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
69280af9daaSJack Xiao else
69380af9daaSJack Xiao mqd_mgr = &adev->mqds[ring->funcs->type];
69480af9daaSJack Xiao
69580af9daaSJack Xiao return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
69680af9daaSJack Xiao }
6973f4c175dSJiadong.Zhu
amdgpu_ring_ib_begin(struct amdgpu_ring * ring)6983f4c175dSJiadong.Zhu void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
6993f4c175dSJiadong.Zhu {
7003f4c175dSJiadong.Zhu if (ring->is_sw_ring)
7013f4c175dSJiadong.Zhu amdgpu_sw_ring_ib_begin(ring);
7023f4c175dSJiadong.Zhu }
7033f4c175dSJiadong.Zhu
amdgpu_ring_ib_end(struct amdgpu_ring * ring)7043f4c175dSJiadong.Zhu void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
7053f4c175dSJiadong.Zhu {
7063f4c175dSJiadong.Zhu if (ring->is_sw_ring)
7073f4c175dSJiadong.Zhu amdgpu_sw_ring_ib_end(ring);
7083f4c175dSJiadong.Zhu }
7098ff865beSJiadong Zhu
amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring * ring)7108ff865beSJiadong Zhu void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
7118ff865beSJiadong Zhu {
7128ff865beSJiadong Zhu if (ring->is_sw_ring)
7138ff865beSJiadong Zhu amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
7148ff865beSJiadong Zhu }
7158ff865beSJiadong Zhu
amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring * ring)7168ff865beSJiadong Zhu void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
7178ff865beSJiadong Zhu {
7188ff865beSJiadong Zhu if (ring->is_sw_ring)
7198ff865beSJiadong Zhu amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
7208ff865beSJiadong Zhu }
7218ff865beSJiadong Zhu
amdgpu_ring_ib_on_emit_de(struct amdgpu_ring * ring)7228ff865beSJiadong Zhu void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
7238ff865beSJiadong Zhu {
7248ff865beSJiadong Zhu if (ring->is_sw_ring)
7258ff865beSJiadong Zhu amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
7268ff865beSJiadong Zhu }
727