1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30 
31 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32 {
33 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
34 	struct amdgpu_job *job = to_amdgpu_job(s_job);
35 	struct amdgpu_task_info ti;
36 	struct amdgpu_device *adev = ring->adev;
37 
38 	memset(&ti, 0, sizeof(struct amdgpu_task_info));
39 
40 	if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
41 		DRM_ERROR("ring %s timeout, but soft recovered\n",
42 			  s_job->sched->name);
43 		return;
44 	}
45 
46 	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
47 	DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
48 		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
49 		  ring->fence_drv.sync_seq);
50 	DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
51 		  ti.process_name, ti.tgid, ti.task_name, ti.pid);
52 
53 	if (amdgpu_device_should_recover_gpu(ring->adev)) {
54 		amdgpu_device_gpu_recover(ring->adev, job);
55 	} else {
56 		drm_sched_suspend_timeout(&ring->sched);
57 		if (amdgpu_sriov_vf(adev))
58 			adev->virt.tdr_debug = true;
59 	}
60 }
61 
62 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
63 		     struct amdgpu_job **job, struct amdgpu_vm *vm)
64 {
65 	size_t size = sizeof(struct amdgpu_job);
66 
67 	if (num_ibs == 0)
68 		return -EINVAL;
69 
70 	size += sizeof(struct amdgpu_ib) * num_ibs;
71 
72 	*job = kzalloc(size, GFP_KERNEL);
73 	if (!*job)
74 		return -ENOMEM;
75 
76 	/*
77 	 * Initialize the scheduler to at least some ring so that we always
78 	 * have a pointer to adev.
79 	 */
80 	(*job)->base.sched = &adev->rings[0]->sched;
81 	(*job)->vm = vm;
82 	(*job)->ibs = (void *)&(*job)[1];
83 	(*job)->num_ibs = num_ibs;
84 
85 	amdgpu_sync_create(&(*job)->sync);
86 	amdgpu_sync_create(&(*job)->sched_sync);
87 	(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
88 	(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
89 
90 	return 0;
91 }
92 
93 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
94 		enum amdgpu_ib_pool_type pool_type,
95 		struct amdgpu_job **job)
96 {
97 	int r;
98 
99 	r = amdgpu_job_alloc(adev, 1, job, NULL);
100 	if (r)
101 		return r;
102 
103 	r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
104 	if (r)
105 		kfree(*job);
106 
107 	return r;
108 }
109 
110 void amdgpu_job_free_resources(struct amdgpu_job *job)
111 {
112 	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
113 	struct dma_fence *f;
114 	unsigned i;
115 
116 	/* use sched fence if available */
117 	f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
118 
119 	for (i = 0; i < job->num_ibs; ++i)
120 		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
121 }
122 
123 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
124 {
125 	struct amdgpu_job *job = to_amdgpu_job(s_job);
126 
127 	drm_sched_job_cleanup(s_job);
128 
129 	dma_fence_put(job->fence);
130 	amdgpu_sync_free(&job->sync);
131 	amdgpu_sync_free(&job->sched_sync);
132 	kfree(job);
133 }
134 
135 void amdgpu_job_free(struct amdgpu_job *job)
136 {
137 	amdgpu_job_free_resources(job);
138 
139 	dma_fence_put(job->fence);
140 	amdgpu_sync_free(&job->sync);
141 	amdgpu_sync_free(&job->sched_sync);
142 	kfree(job);
143 }
144 
145 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
146 		      void *owner, struct dma_fence **f)
147 {
148 	int r;
149 
150 	if (!f)
151 		return -EINVAL;
152 
153 	r = drm_sched_job_init(&job->base, entity, owner);
154 	if (r)
155 		return r;
156 
157 	*f = dma_fence_get(&job->base.s_fence->finished);
158 	amdgpu_job_free_resources(job);
159 	drm_sched_entity_push_job(&job->base, entity);
160 
161 	return 0;
162 }
163 
164 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
165 			     struct dma_fence **fence)
166 {
167 	int r;
168 
169 	job->base.sched = &ring->sched;
170 	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
171 	job->fence = dma_fence_get(*fence);
172 	if (r)
173 		return r;
174 
175 	amdgpu_job_free(job);
176 	return 0;
177 }
178 
179 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
180 					       struct drm_sched_entity *s_entity)
181 {
182 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
183 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
184 	struct amdgpu_vm *vm = job->vm;
185 	struct dma_fence *fence;
186 	bool explicit = false;
187 	int r;
188 
189 	fence = amdgpu_sync_get_fence(&job->sync, &explicit);
190 	if (fence && explicit) {
191 		if (drm_sched_dependency_optimized(fence, s_entity)) {
192 			r = amdgpu_sync_fence(&job->sched_sync, fence, false);
193 			if (r)
194 				DRM_ERROR("Error adding fence (%d)\n", r);
195 		}
196 	}
197 
198 	while (fence == NULL && vm && !job->vmid) {
199 		r = amdgpu_vmid_grab(vm, ring, &job->sync,
200 				     &job->base.s_fence->finished,
201 				     job);
202 		if (r)
203 			DRM_ERROR("Error getting VM ID (%d)\n", r);
204 
205 		fence = amdgpu_sync_get_fence(&job->sync, NULL);
206 	}
207 
208 	return fence;
209 }
210 
211 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
212 {
213 	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
214 	struct dma_fence *fence = NULL, *finished;
215 	struct amdgpu_job *job;
216 	int r = 0;
217 
218 	job = to_amdgpu_job(sched_job);
219 	finished = &job->base.s_fence->finished;
220 
221 	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
222 
223 	trace_amdgpu_sched_run_job(job);
224 
225 	if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
226 		dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
227 
228 	if (finished->error < 0) {
229 		DRM_INFO("Skip scheduling IBs!\n");
230 	} else {
231 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
232 				       &fence);
233 		if (r)
234 			DRM_ERROR("Error scheduling IBs (%d)\n", r);
235 	}
236 	/* if gpu reset, hw fence will be replaced here */
237 	dma_fence_put(job->fence);
238 	job->fence = dma_fence_get(fence);
239 
240 	amdgpu_job_free_resources(job);
241 
242 	fence = r ? ERR_PTR(r) : fence;
243 	return fence;
244 }
245 
246 #define to_drm_sched_job(sched_job)		\
247 		container_of((sched_job), struct drm_sched_job, queue_node)
248 
249 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
250 {
251 	struct drm_sched_job *s_job;
252 	struct drm_sched_entity *s_entity = NULL;
253 	int i;
254 
255 	/* Signal all jobs not yet scheduled */
256 	for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
257 		struct drm_sched_rq *rq = &sched->sched_rq[i];
258 
259 		if (!rq)
260 			continue;
261 
262 		spin_lock(&rq->lock);
263 		list_for_each_entry(s_entity, &rq->entities, list) {
264 			while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
265 				struct drm_sched_fence *s_fence = s_job->s_fence;
266 
267 				dma_fence_signal(&s_fence->scheduled);
268 				dma_fence_set_error(&s_fence->finished, -EHWPOISON);
269 				dma_fence_signal(&s_fence->finished);
270 			}
271 		}
272 		spin_unlock(&rq->lock);
273 	}
274 
275 	/* Signal all jobs already scheduled to HW */
276 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
277 		struct drm_sched_fence *s_fence = s_job->s_fence;
278 
279 		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
280 		dma_fence_signal(&s_fence->finished);
281 	}
282 }
283 
284 const struct drm_sched_backend_ops amdgpu_sched_ops = {
285 	.dependency = amdgpu_job_dependency,
286 	.run_job = amdgpu_job_run,
287 	.timedout_job = amdgpu_job_timedout,
288 	.free_job = amdgpu_job_free_cb
289 };
290