1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  *          Christian König
28  */
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 
32 #include <drm/amdgpu_drm.h>
33 #include <drm/drm_debugfs.h>
34 
35 #include "amdgpu.h"
36 #include "atom.h"
37 #include "amdgpu_trace.h"
38 
39 #define AMDGPU_IB_TEST_TIMEOUT	msecs_to_jiffies(1000)
40 #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT	msecs_to_jiffies(2000)
41 
42 /*
43  * IB
44  * IBs (Indirect Buffers) and areas of GPU accessible memory where
45  * commands are stored.  You can put a pointer to the IB in the
46  * command ring and the hw will fetch the commands from the IB
47  * and execute them.  Generally userspace acceleration drivers
48  * produce command buffers which are send to the kernel and
49  * put in IBs for execution by the requested ring.
50  */
51 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
52 
53 /**
54  * amdgpu_ib_get - request an IB (Indirect Buffer)
55  *
56  * @ring: ring index the IB is associated with
57  * @size: requested IB size
58  * @ib: IB object returned
59  *
60  * Request an IB (all asics).  IBs are allocated using the
61  * suballocator.
62  * Returns 0 on success, error on failure.
63  */
64 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
65 		  unsigned size, struct amdgpu_ib *ib)
66 {
67 	int r;
68 
69 	if (size) {
70 		r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
71 				      &ib->sa_bo, size, 256);
72 		if (r) {
73 			dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
74 			return r;
75 		}
76 
77 		ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
78 
79 		if (!vm)
80 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
81 	}
82 
83 	return 0;
84 }
85 
86 /**
87  * amdgpu_ib_free - free an IB (Indirect Buffer)
88  *
89  * @adev: amdgpu_device pointer
90  * @ib: IB object to free
91  * @f: the fence SA bo need wait on for the ib alloation
92  *
93  * Free an IB (all asics).
94  */
95 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
96 		    struct dma_fence *f)
97 {
98 	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
99 }
100 
101 /**
102  * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
103  *
104  * @adev: amdgpu_device pointer
105  * @num_ibs: number of IBs to schedule
106  * @ibs: IB objects to schedule
107  * @f: fence created during this submission
108  *
109  * Schedule an IB on the associated ring (all asics).
110  * Returns 0 on success, error on failure.
111  *
112  * On SI, there are two parallel engines fed from the primary ring,
113  * the CE (Constant Engine) and the DE (Drawing Engine).  Since
114  * resource descriptors have moved to memory, the CE allows you to
115  * prime the caches while the DE is updating register state so that
116  * the resource descriptors will be already in cache when the draw is
117  * processed.  To accomplish this, the userspace driver submits two
118  * IBs, one for the CE and one for the DE.  If there is a CE IB (called
119  * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
120  * to SI there was just a DE IB.
121  */
122 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
123 		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
124 		       struct dma_fence **f)
125 {
126 	struct amdgpu_device *adev = ring->adev;
127 	struct amdgpu_ib *ib = &ibs[0];
128 	struct dma_fence *tmp = NULL;
129 	bool skip_preamble, need_ctx_switch;
130 	unsigned patch_offset = ~0;
131 	struct amdgpu_vm *vm;
132 	uint64_t fence_ctx;
133 	uint32_t status = 0, alloc_size;
134 	unsigned fence_flags = 0;
135 
136 	unsigned i;
137 	int r = 0;
138 	bool need_pipe_sync = false;
139 
140 	if (num_ibs == 0)
141 		return -EINVAL;
142 
143 	/* ring tests don't use a job */
144 	if (job) {
145 		vm = job->vm;
146 		fence_ctx = job->base.s_fence->scheduled.context;
147 	} else {
148 		vm = NULL;
149 		fence_ctx = 0;
150 	}
151 
152 	if (!ring->sched.ready) {
153 		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
154 		return -EINVAL;
155 	}
156 
157 	if (vm && !job->vmid) {
158 		dev_err(adev->dev, "VM IB without ID\n");
159 		return -EINVAL;
160 	}
161 
162 	alloc_size = ring->funcs->emit_frame_size + num_ibs *
163 		ring->funcs->emit_ib_size;
164 
165 	r = amdgpu_ring_alloc(ring, alloc_size);
166 	if (r) {
167 		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
168 		return r;
169 	}
170 
171 	need_ctx_switch = ring->current_ctx != fence_ctx;
172 	if (ring->funcs->emit_pipeline_sync && job &&
173 	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
174 	     (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
175 	     amdgpu_vm_need_pipeline_sync(ring, job))) {
176 		need_pipe_sync = true;
177 
178 		if (tmp)
179 			trace_amdgpu_ib_pipe_sync(job, tmp);
180 
181 		dma_fence_put(tmp);
182 	}
183 
184 	if (ring->funcs->insert_start)
185 		ring->funcs->insert_start(ring);
186 
187 	if (job) {
188 		r = amdgpu_vm_flush(ring, job, need_pipe_sync);
189 		if (r) {
190 			amdgpu_ring_undo(ring);
191 			return r;
192 		}
193 	}
194 
195 	if (job && ring->funcs->init_cond_exec)
196 		patch_offset = amdgpu_ring_init_cond_exec(ring);
197 
198 #ifdef CONFIG_X86_64
199 	if (!(adev->flags & AMD_IS_APU))
200 #endif
201 	{
202 		if (ring->funcs->emit_hdp_flush)
203 			amdgpu_ring_emit_hdp_flush(ring);
204 		else
205 			amdgpu_asic_flush_hdp(adev, ring);
206 	}
207 
208 	if (need_ctx_switch)
209 		status |= AMDGPU_HAVE_CTX_SWITCH;
210 
211 	skip_preamble = ring->current_ctx == fence_ctx;
212 	if (job && ring->funcs->emit_cntxcntl) {
213 		status |= job->preamble_status;
214 		status |= job->preemption_status;
215 		amdgpu_ring_emit_cntxcntl(ring, status);
216 	}
217 
218 	for (i = 0; i < num_ibs; ++i) {
219 		ib = &ibs[i];
220 
221 		/* drop preamble IBs if we don't have a context switch */
222 		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
223 		    skip_preamble &&
224 		    !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
225 		    !amdgpu_mcbp &&
226 		    !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
227 			continue;
228 
229 		amdgpu_ring_emit_ib(ring, job, ib, status);
230 		status &= ~AMDGPU_HAVE_CTX_SWITCH;
231 	}
232 
233 	if (ring->funcs->emit_tmz)
234 		amdgpu_ring_emit_tmz(ring, false);
235 
236 #ifdef CONFIG_X86_64
237 	if (!(adev->flags & AMD_IS_APU))
238 #endif
239 		amdgpu_asic_invalidate_hdp(adev, ring);
240 
241 	if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
242 		fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
243 
244 	/* wrap the last IB with fence */
245 	if (job && job->uf_addr) {
246 		amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
247 				       fence_flags | AMDGPU_FENCE_FLAG_64BIT);
248 	}
249 
250 	r = amdgpu_fence_emit(ring, f, fence_flags);
251 	if (r) {
252 		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
253 		if (job && job->vmid)
254 			amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
255 		amdgpu_ring_undo(ring);
256 		return r;
257 	}
258 
259 	if (ring->funcs->insert_end)
260 		ring->funcs->insert_end(ring);
261 
262 	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
263 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
264 
265 	ring->current_ctx = fence_ctx;
266 	if (vm && ring->funcs->emit_switch_buffer)
267 		amdgpu_ring_emit_switch_buffer(ring);
268 	amdgpu_ring_commit(ring);
269 	return 0;
270 }
271 
272 /**
273  * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
274  *
275  * @adev: amdgpu_device pointer
276  *
277  * Initialize the suballocator to manage a pool of memory
278  * for use as IBs (all asics).
279  * Returns 0 on success, error on failure.
280  */
281 int amdgpu_ib_pool_init(struct amdgpu_device *adev)
282 {
283 	int r;
284 
285 	if (adev->ib_pool_ready) {
286 		return 0;
287 	}
288 	r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
289 				      AMDGPU_IB_POOL_SIZE*64*1024,
290 				      AMDGPU_GPU_PAGE_SIZE,
291 				      AMDGPU_GEM_DOMAIN_GTT);
292 	if (r) {
293 		return r;
294 	}
295 
296 	adev->ib_pool_ready = true;
297 	if (amdgpu_debugfs_sa_init(adev)) {
298 		dev_err(adev->dev, "failed to register debugfs file for SA\n");
299 	}
300 	return 0;
301 }
302 
303 /**
304  * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
305  *
306  * @adev: amdgpu_device pointer
307  *
308  * Tear down the suballocator managing the pool of memory
309  * for use as IBs (all asics).
310  */
311 void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
312 {
313 	if (adev->ib_pool_ready) {
314 		amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
315 		adev->ib_pool_ready = false;
316 	}
317 }
318 
319 /**
320  * amdgpu_ib_ring_tests - test IBs on the rings
321  *
322  * @adev: amdgpu_device pointer
323  *
324  * Test an IB (Indirect Buffer) on each ring.
325  * If the test fails, disable the ring.
326  * Returns 0 on success, error if the primary GFX ring
327  * IB test fails.
328  */
329 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
330 {
331 	unsigned i;
332 	int r, ret = 0;
333 	long tmo_gfx, tmo_mm;
334 
335 	tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
336 	if (amdgpu_sriov_vf(adev)) {
337 		/* for MM engines in hypervisor side they are not scheduled together
338 		 * with CP and SDMA engines, so even in exclusive mode MM engine could
339 		 * still running on other VF thus the IB TEST TIMEOUT for MM engines
340 		 * under SR-IOV should be set to a long time. 8 sec should be enough
341 		 * for the MM comes back to this VF.
342 		 */
343 		tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
344 	}
345 
346 	if (amdgpu_sriov_runtime(adev)) {
347 		/* for CP & SDMA engines since they are scheduled together so
348 		 * need to make the timeout width enough to cover the time
349 		 * cost waiting for it coming back under RUNTIME only
350 		*/
351 		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
352 	} else if (adev->gmc.xgmi.hive_id) {
353 		tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
354 	}
355 
356 	for (i = 0; i < adev->num_rings; ++i) {
357 		struct amdgpu_ring *ring = adev->rings[i];
358 		long tmo;
359 
360 		/* KIQ rings don't have an IB test because we never submit IBs
361 		 * to them and they have no interrupt support.
362 		 */
363 		if (!ring->sched.ready || !ring->funcs->test_ib)
364 			continue;
365 
366 		/* MM engine need more time */
367 		if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
368 			ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
369 			ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
370 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
371 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
372 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
373 			tmo = tmo_mm;
374 		else
375 			tmo = tmo_gfx;
376 
377 		r = amdgpu_ring_test_ib(ring, tmo);
378 		if (!r) {
379 			DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
380 				      ring->name);
381 			continue;
382 		}
383 
384 		ring->sched.ready = false;
385 		DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
386 			  ring->name, r);
387 
388 		if (ring == &adev->gfx.gfx_ring[0]) {
389 			/* oh, oh, that's really bad */
390 			adev->accel_working = false;
391 			return r;
392 
393 		} else {
394 			ret = r;
395 		}
396 	}
397 	return ret;
398 }
399 
400 /*
401  * Debugfs info
402  */
403 #if defined(CONFIG_DEBUG_FS)
404 
405 static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
406 {
407 	struct drm_info_node *node = (struct drm_info_node *) m->private;
408 	struct drm_device *dev = node->minor->dev;
409 	struct amdgpu_device *adev = dev->dev_private;
410 
411 	amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
412 
413 	return 0;
414 
415 }
416 
417 static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
418 	{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
419 };
420 
421 #endif
422 
423 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
424 {
425 #if defined(CONFIG_DEBUG_FS)
426 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
427 #else
428 	return 0;
429 #endif
430 }
431