xref: /openbmc/linux/drivers/gpu/drm/nouveau/nouveau_sched.c (revision aa298b30ce566bb7fe0d5967d3d864cf636d8e4f)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/slab.h>
4 #include <drm/gpu_scheduler.h>
5 #include <drm/drm_syncobj.h>
6 
7 #include "nouveau_drv.h"
8 #include "nouveau_gem.h"
9 #include "nouveau_mem.h"
10 #include "nouveau_dma.h"
11 #include "nouveau_exec.h"
12 #include "nouveau_abi16.h"
13 #include "nouveau_sched.h"
14 
15 /* FIXME
16  *
17  * We want to make sure that jobs currently executing can't be deferred by
18  * other jobs competing for the hardware. Otherwise we might end up with job
19  * timeouts just because of too many clients submitting too many jobs. We don't
20  * want jobs to time out because of system load, but because of the job being
21  * too bulky.
22  *
23  * For now allow for up to 16 concurrent jobs in flight until we know how many
24  * rings the hardware can process in parallel.
25  */
26 #define NOUVEAU_SCHED_HW_SUBMISSIONS		16
27 #define NOUVEAU_SCHED_JOB_TIMEOUT_MS		10000
28 
29 int
30 nouveau_job_init(struct nouveau_job *job,
31 		 struct nouveau_job_args *args)
32 {
33 	struct nouveau_sched_entity *entity = args->sched_entity;
34 	int ret;
35 
36 	job->file_priv = args->file_priv;
37 	job->cli = nouveau_cli(args->file_priv);
38 	job->entity = entity;
39 
40 	job->sync = args->sync;
41 	job->resv_usage = args->resv_usage;
42 
43 	job->ops = args->ops;
44 
45 	job->in_sync.count = args->in_sync.count;
46 	if (job->in_sync.count) {
47 		if (job->sync)
48 			return -EINVAL;
49 
50 		job->in_sync.data = kmemdup(args->in_sync.s,
51 					 sizeof(*args->in_sync.s) *
52 					 args->in_sync.count,
53 					 GFP_KERNEL);
54 		if (!job->in_sync.data)
55 			return -ENOMEM;
56 	}
57 
58 	job->out_sync.count = args->out_sync.count;
59 	if (job->out_sync.count) {
60 		if (job->sync) {
61 			ret = -EINVAL;
62 			goto err_free_in_sync;
63 		}
64 
65 		job->out_sync.data = kmemdup(args->out_sync.s,
66 					  sizeof(*args->out_sync.s) *
67 					  args->out_sync.count,
68 					  GFP_KERNEL);
69 		if (!job->out_sync.data) {
70 			ret = -ENOMEM;
71 			goto err_free_in_sync;
72 		}
73 
74 		job->out_sync.objs = kcalloc(job->out_sync.count,
75 					     sizeof(*job->out_sync.objs),
76 					     GFP_KERNEL);
77 		if (!job->out_sync.objs) {
78 			ret = -ENOMEM;
79 			goto err_free_out_sync;
80 		}
81 
82 		job->out_sync.chains = kcalloc(job->out_sync.count,
83 					       sizeof(*job->out_sync.chains),
84 					       GFP_KERNEL);
85 		if (!job->out_sync.chains) {
86 			ret = -ENOMEM;
87 			goto err_free_objs;
88 		}
89 
90 	}
91 
92 	ret = drm_sched_job_init(&job->base, &entity->base, NULL);
93 	if (ret)
94 		goto err_free_chains;
95 
96 	job->state = NOUVEAU_JOB_INITIALIZED;
97 
98 	return 0;
99 
100 err_free_chains:
101 	kfree(job->out_sync.chains);
102 err_free_objs:
103 	kfree(job->out_sync.objs);
104 err_free_out_sync:
105 	kfree(job->out_sync.data);
106 err_free_in_sync:
107 	kfree(job->in_sync.data);
108 return ret;
109 }
110 
111 void
112 nouveau_job_free(struct nouveau_job *job)
113 {
114 	kfree(job->in_sync.data);
115 	kfree(job->out_sync.data);
116 	kfree(job->out_sync.objs);
117 	kfree(job->out_sync.chains);
118 }
119 
120 void nouveau_job_fini(struct nouveau_job *job)
121 {
122 	dma_fence_put(job->done_fence);
123 	drm_sched_job_cleanup(&job->base);
124 	job->ops->free(job);
125 }
126 
127 static int
128 sync_find_fence(struct nouveau_job *job,
129 		struct drm_nouveau_sync *sync,
130 		struct dma_fence **fence)
131 {
132 	u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
133 	u64 point = 0;
134 	int ret;
135 
136 	if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
137 	    stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
138 		return -EOPNOTSUPP;
139 
140 	if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
141 		point = sync->timeline_value;
142 
143 	ret = drm_syncobj_find_fence(job->file_priv,
144 				     sync->handle, point,
145 				     0 /* flags */, fence);
146 	if (ret)
147 		return ret;
148 
149 	return 0;
150 }
151 
152 static int
153 nouveau_job_add_deps(struct nouveau_job *job)
154 {
155 	struct dma_fence *in_fence = NULL;
156 	int ret, i;
157 
158 	for (i = 0; i < job->in_sync.count; i++) {
159 		struct drm_nouveau_sync *sync = &job->in_sync.data[i];
160 
161 		ret = sync_find_fence(job, sync, &in_fence);
162 		if (ret) {
163 			NV_PRINTK(warn, job->cli,
164 				  "Failed to find syncobj (-> in): handle=%d\n",
165 				  sync->handle);
166 			return ret;
167 		}
168 
169 		ret = drm_sched_job_add_dependency(&job->base, in_fence);
170 		if (ret)
171 			return ret;
172 	}
173 
174 	return 0;
175 }
176 
177 static void
178 nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
179 {
180 	int i;
181 
182 	for (i = 0; i < job->out_sync.count; i++) {
183 		struct drm_syncobj *obj = job->out_sync.objs[i];
184 		struct dma_fence_chain *chain = job->out_sync.chains[i];
185 
186 		if (obj)
187 			drm_syncobj_put(obj);
188 
189 		if (chain)
190 			dma_fence_chain_free(chain);
191 	}
192 }
193 
194 static int
195 nouveau_job_fence_attach_prepare(struct nouveau_job *job)
196 {
197 	int i, ret;
198 
199 	for (i = 0; i < job->out_sync.count; i++) {
200 		struct drm_nouveau_sync *sync = &job->out_sync.data[i];
201 		struct drm_syncobj **pobj = &job->out_sync.objs[i];
202 		struct dma_fence_chain **pchain = &job->out_sync.chains[i];
203 		u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
204 
205 		if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
206 		    stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
207 			ret = -EINVAL;
208 			goto err_sync_cleanup;
209 		}
210 
211 		*pobj = drm_syncobj_find(job->file_priv, sync->handle);
212 		if (!*pobj) {
213 			NV_PRINTK(warn, job->cli,
214 				  "Failed to find syncobj (-> out): handle=%d\n",
215 				  sync->handle);
216 			ret = -ENOENT;
217 			goto err_sync_cleanup;
218 		}
219 
220 		if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
221 			*pchain = dma_fence_chain_alloc();
222 			if (!*pchain) {
223 				ret = -ENOMEM;
224 				goto err_sync_cleanup;
225 			}
226 		}
227 	}
228 
229 	return 0;
230 
231 err_sync_cleanup:
232 	nouveau_job_fence_attach_cleanup(job);
233 	return ret;
234 }
235 
236 static void
237 nouveau_job_fence_attach(struct nouveau_job *job)
238 {
239 	struct dma_fence *fence = job->done_fence;
240 	int i;
241 
242 	for (i = 0; i < job->out_sync.count; i++) {
243 		struct drm_nouveau_sync *sync = &job->out_sync.data[i];
244 		struct drm_syncobj **pobj = &job->out_sync.objs[i];
245 		struct dma_fence_chain **pchain = &job->out_sync.chains[i];
246 		u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
247 
248 		if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
249 			drm_syncobj_add_point(*pobj, *pchain, fence,
250 					      sync->timeline_value);
251 		} else {
252 			drm_syncobj_replace_fence(*pobj, fence);
253 		}
254 
255 		drm_syncobj_put(*pobj);
256 		*pobj = NULL;
257 		*pchain = NULL;
258 	}
259 }
260 
261 int
262 nouveau_job_submit(struct nouveau_job *job)
263 {
264 	struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
265 	struct dma_fence *done_fence = NULL;
266 	int ret;
267 
268 	ret = nouveau_job_add_deps(job);
269 	if (ret)
270 		goto err;
271 
272 	ret = nouveau_job_fence_attach_prepare(job);
273 	if (ret)
274 		goto err;
275 
276 	/* Make sure the job appears on the sched_entity's queue in the same
277 	 * order as it was submitted.
278 	 */
279 	mutex_lock(&entity->mutex);
280 
281 	/* Guarantee we won't fail after the submit() callback returned
282 	 * successfully.
283 	 */
284 	if (job->ops->submit) {
285 		ret = job->ops->submit(job);
286 		if (ret)
287 			goto err_cleanup;
288 	}
289 
290 	drm_sched_job_arm(&job->base);
291 	job->done_fence = dma_fence_get(&job->base.s_fence->finished);
292 	if (job->sync)
293 		done_fence = dma_fence_get(job->done_fence);
294 
295 	if (job->ops->armed_submit)
296 		job->ops->armed_submit(job);
297 
298 	nouveau_job_fence_attach(job);
299 
300 	/* Set job state before pushing the job to the scheduler,
301 	 * such that we do not overwrite the job state set in run().
302 	 */
303 	job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
304 
305 	drm_sched_entity_push_job(&job->base);
306 
307 	mutex_unlock(&entity->mutex);
308 
309 	if (done_fence) {
310 		dma_fence_wait(done_fence, true);
311 		dma_fence_put(done_fence);
312 	}
313 
314 	return 0;
315 
316 err_cleanup:
317 	mutex_unlock(&entity->mutex);
318 	nouveau_job_fence_attach_cleanup(job);
319 err:
320 	job->state = NOUVEAU_JOB_SUBMIT_FAILED;
321 	return ret;
322 }
323 
324 bool
325 nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
326 			   struct work_struct *work)
327 {
328 	return queue_work(entity->sched_wq, work);
329 }
330 
331 static struct dma_fence *
332 nouveau_job_run(struct nouveau_job *job)
333 {
334 	struct dma_fence *fence;
335 
336 	fence = job->ops->run(job);
337 	if (IS_ERR(fence))
338 		job->state = NOUVEAU_JOB_RUN_FAILED;
339 	else
340 		job->state = NOUVEAU_JOB_RUN_SUCCESS;
341 
342 	return fence;
343 }
344 
345 static struct dma_fence *
346 nouveau_sched_run_job(struct drm_sched_job *sched_job)
347 {
348 	struct nouveau_job *job = to_nouveau_job(sched_job);
349 
350 	return nouveau_job_run(job);
351 }
352 
353 static enum drm_gpu_sched_stat
354 nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
355 {
356 	struct nouveau_job *job = to_nouveau_job(sched_job);
357 
358 	NV_PRINTK(warn, job->cli, "Job timed out.\n");
359 
360 	if (job->ops->timeout)
361 		return job->ops->timeout(job);
362 
363 	return DRM_GPU_SCHED_STAT_ENODEV;
364 }
365 
366 static void
367 nouveau_sched_free_job(struct drm_sched_job *sched_job)
368 {
369 	struct nouveau_job *job = to_nouveau_job(sched_job);
370 
371 	nouveau_job_fini(job);
372 }
373 
374 int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
375 			      struct drm_gpu_scheduler *sched,
376 			      struct workqueue_struct *sched_wq)
377 {
378 	mutex_init(&entity->mutex);
379 	spin_lock_init(&entity->job.list.lock);
380 	INIT_LIST_HEAD(&entity->job.list.head);
381 	init_waitqueue_head(&entity->job.wq);
382 
383 	entity->sched_wq = sched_wq;
384 	return drm_sched_entity_init(&entity->base,
385 				     DRM_SCHED_PRIORITY_NORMAL,
386 				     &sched, 1, NULL);
387 }
388 
389 void
390 nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
391 {
392 	drm_sched_entity_destroy(&entity->base);
393 }
394 
395 static const struct drm_sched_backend_ops nouveau_sched_ops = {
396 	.run_job = nouveau_sched_run_job,
397 	.timedout_job = nouveau_sched_timedout_job,
398 	.free_job = nouveau_sched_free_job,
399 };
400 
401 int nouveau_sched_init(struct nouveau_drm *drm)
402 {
403 	struct drm_gpu_scheduler *sched = &drm->sched;
404 	long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
405 
406 	drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
407 	if (!drm->sched_wq)
408 		return -ENOMEM;
409 
410 	return drm_sched_init(sched, &nouveau_sched_ops,
411 			      NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
412 			      NULL, NULL, "nouveau_sched", drm->dev->dev);
413 }
414 
415 void nouveau_sched_fini(struct nouveau_drm *drm)
416 {
417 	destroy_workqueue(drm->sched_wq);
418 	drm_sched_fini(&drm->sched);
419 }
420