xref: /openbmc/linux/drivers/accel/ivpu/ivpu_job.c (revision 5a46e490)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #include <drm/drm_file.h>
7 
8 #include <linux/bitfield.h>
9 #include <linux/highmem.h>
10 #include <linux/kthread.h>
11 #include <linux/pci.h>
12 #include <linux/module.h>
13 #include <uapi/drm/ivpu_accel.h>
14 
15 #include "ivpu_drv.h"
16 #include "ivpu_hw.h"
17 #include "ivpu_ipc.h"
18 #include "ivpu_job.h"
19 #include "ivpu_jsm_msg.h"
20 #include "ivpu_pm.h"
21 
22 #define CMD_BUF_IDX	     0
23 #define JOB_ID_JOB_MASK	     GENMASK(7, 0)
24 #define JOB_ID_CONTEXT_MASK  GENMASK(31, 8)
25 #define JOB_MAX_BUFFER_COUNT 65535
26 
27 static unsigned int ivpu_tdr_timeout_ms;
28 module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, uint, 0644);
29 MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
30 
31 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
32 {
33 	ivpu_hw_reg_db_set(vdev, cmdq->db_id);
34 }
35 
36 static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
37 {
38 	struct ivpu_device *vdev = file_priv->vdev;
39 	struct vpu_job_queue_header *jobq_header;
40 	struct ivpu_cmdq *cmdq;
41 
42 	cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
43 	if (!cmdq)
44 		return NULL;
45 
46 	cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC);
47 	if (!cmdq->mem)
48 		goto cmdq_free;
49 
50 	cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
51 	cmdq->entry_count = (u32)((cmdq->mem->base.size - sizeof(struct vpu_job_queue_header)) /
52 				  sizeof(struct vpu_job_queue_entry));
53 
54 	cmdq->jobq = (struct vpu_job_queue *)cmdq->mem->kvaddr;
55 	jobq_header = &cmdq->jobq->header;
56 	jobq_header->engine_idx = engine;
57 	jobq_header->head = 0;
58 	jobq_header->tail = 0;
59 	wmb(); /* Flush WC buffer for jobq->header */
60 
61 	return cmdq;
62 
63 cmdq_free:
64 	kfree(cmdq);
65 	return NULL;
66 }
67 
68 static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
69 {
70 	if (!cmdq)
71 		return;
72 
73 	ivpu_bo_free_internal(cmdq->mem);
74 	kfree(cmdq);
75 }
76 
77 static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
78 {
79 	struct ivpu_device *vdev = file_priv->vdev;
80 	struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
81 	int ret;
82 
83 	lockdep_assert_held(&file_priv->lock);
84 
85 	if (!cmdq) {
86 		cmdq = ivpu_cmdq_alloc(file_priv, engine);
87 		if (!cmdq)
88 			return NULL;
89 		file_priv->cmdq[engine] = cmdq;
90 	}
91 
92 	if (cmdq->db_registered)
93 		return cmdq;
94 
95 	ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
96 				   cmdq->mem->vpu_addr, cmdq->mem->base.size);
97 	if (ret)
98 		return NULL;
99 
100 	cmdq->db_registered = true;
101 
102 	return cmdq;
103 }
104 
105 static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
106 {
107 	struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
108 
109 	lockdep_assert_held(&file_priv->lock);
110 
111 	if (cmdq) {
112 		file_priv->cmdq[engine] = NULL;
113 		if (cmdq->db_registered)
114 			ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
115 
116 		ivpu_cmdq_free(file_priv, cmdq);
117 	}
118 }
119 
120 void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
121 {
122 	int i;
123 
124 	mutex_lock(&file_priv->lock);
125 
126 	for (i = 0; i < IVPU_NUM_ENGINES; i++)
127 		ivpu_cmdq_release_locked(file_priv, i);
128 
129 	mutex_unlock(&file_priv->lock);
130 }
131 
132 /*
133  * Mark the doorbell as unregistered and reset job queue pointers.
134  * This function needs to be called when the VPU hardware is restarted
135  * and FW looses job queue state. The next time job queue is used it
136  * will be registered again.
137  */
138 static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
139 {
140 	struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
141 
142 	lockdep_assert_held(&file_priv->lock);
143 
144 	if (cmdq) {
145 		cmdq->db_registered = false;
146 		cmdq->jobq->header.head = 0;
147 		cmdq->jobq->header.tail = 0;
148 		wmb(); /* Flush WC buffer for jobq header */
149 	}
150 }
151 
152 static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
153 {
154 	int i;
155 
156 	mutex_lock(&file_priv->lock);
157 
158 	for (i = 0; i < IVPU_NUM_ENGINES; i++)
159 		ivpu_cmdq_reset_locked(file_priv, i);
160 
161 	mutex_unlock(&file_priv->lock);
162 }
163 
164 void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
165 {
166 	struct ivpu_file_priv *file_priv;
167 	unsigned long ctx_id;
168 
169 	xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
170 		file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
171 		if (!file_priv)
172 			continue;
173 
174 		ivpu_cmdq_reset_all(file_priv);
175 
176 		ivpu_file_priv_put(&file_priv);
177 	}
178 }
179 
180 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
181 {
182 	struct ivpu_device *vdev = job->vdev;
183 	struct vpu_job_queue_header *header = &cmdq->jobq->header;
184 	struct vpu_job_queue_entry *entry;
185 	u32 tail = READ_ONCE(header->tail);
186 	u32 next_entry = (tail + 1) % cmdq->entry_count;
187 
188 	/* Check if there is space left in job queue */
189 	if (next_entry == header->head) {
190 		ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
191 			 job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
192 		return -EBUSY;
193 	}
194 
195 	entry = &cmdq->jobq->job[tail];
196 	entry->batch_buf_addr = job->cmd_buf_vpu_addr;
197 	entry->job_id = job->job_id;
198 	entry->flags = 0;
199 	wmb(); /* Ensure that tail is updated after filling entry */
200 	header->tail = next_entry;
201 	wmb(); /* Flush WC buffer for jobq header */
202 
203 	return 0;
204 }
205 
206 struct ivpu_fence {
207 	struct dma_fence base;
208 	spinlock_t lock; /* protects base */
209 	struct ivpu_device *vdev;
210 };
211 
212 static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
213 {
214 	return container_of(fence, struct ivpu_fence, base);
215 }
216 
217 static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
218 {
219 	return DRIVER_NAME;
220 }
221 
222 static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
223 {
224 	struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
225 
226 	return dev_name(ivpu_fence->vdev->drm.dev);
227 }
228 
229 static const struct dma_fence_ops ivpu_fence_ops = {
230 	.get_driver_name = ivpu_fence_get_driver_name,
231 	.get_timeline_name = ivpu_fence_get_timeline_name,
232 };
233 
234 static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
235 {
236 	struct ivpu_fence *fence;
237 
238 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
239 	if (!fence)
240 		return NULL;
241 
242 	fence->vdev = vdev;
243 	spin_lock_init(&fence->lock);
244 	dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
245 
246 	return &fence->base;
247 }
248 
249 static void job_get(struct ivpu_job *job, struct ivpu_job **link)
250 {
251 	struct ivpu_device *vdev = job->vdev;
252 
253 	kref_get(&job->ref);
254 	*link = job;
255 
256 	ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
257 }
258 
259 static void job_release(struct kref *ref)
260 {
261 	struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
262 	struct ivpu_device *vdev = job->vdev;
263 	u32 i;
264 
265 	for (i = 0; i < job->bo_count; i++)
266 		if (job->bos[i])
267 			drm_gem_object_put(&job->bos[i]->base);
268 
269 	dma_fence_put(job->done_fence);
270 	ivpu_file_priv_put(&job->file_priv);
271 
272 	ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
273 	kfree(job);
274 
275 	/* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
276 	ivpu_rpm_put(vdev);
277 }
278 
279 static void job_put(struct ivpu_job *job)
280 {
281 	struct ivpu_device *vdev = job->vdev;
282 
283 	ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
284 	kref_put(&job->ref, job_release);
285 }
286 
287 static struct ivpu_job *
288 ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
289 {
290 	struct ivpu_device *vdev = file_priv->vdev;
291 	struct ivpu_job *job;
292 	size_t buf_size;
293 	int ret;
294 
295 	ret = ivpu_rpm_get(vdev);
296 	if (ret < 0)
297 		return NULL;
298 
299 	buf_size = sizeof(*job) + bo_count * sizeof(struct ivpu_bo *);
300 	job = kzalloc(buf_size, GFP_KERNEL);
301 	if (!job)
302 		goto err_rpm_put;
303 
304 	kref_init(&job->ref);
305 
306 	job->vdev = vdev;
307 	job->engine_idx = engine_idx;
308 	job->bo_count = bo_count;
309 	job->done_fence = ivpu_fence_create(vdev);
310 	if (!job->done_fence) {
311 		ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
312 		goto err_free_job;
313 	}
314 
315 	job->file_priv = ivpu_file_priv_get(file_priv);
316 
317 	ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
318 
319 	return job;
320 
321 err_free_job:
322 	kfree(job);
323 err_rpm_put:
324 	ivpu_rpm_put(vdev);
325 	return NULL;
326 }
327 
328 static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
329 {
330 	struct ivpu_job *job;
331 
332 	job = xa_erase(&vdev->submitted_jobs_xa, job_id);
333 	if (!job)
334 		return -ENOENT;
335 
336 	if (job->file_priv->has_mmu_faults)
337 		job_status = VPU_JSM_STATUS_ABORTED;
338 
339 	job->bos[CMD_BUF_IDX]->job_status = job_status;
340 	dma_fence_signal(job->done_fence);
341 
342 	ivpu_dbg(vdev, JOB, "Job complete:  id %3u ctx %2d engine %d status 0x%x\n",
343 		 job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
344 
345 	job_put(job);
346 	return 0;
347 }
348 
349 static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg)
350 {
351 	struct vpu_ipc_msg_payload_job_done *payload;
352 	struct vpu_jsm_msg *job_ret_msg = msg;
353 	int ret;
354 
355 	payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload;
356 
357 	ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
358 	if (ret)
359 		ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret);
360 }
361 
362 void ivpu_jobs_abort_all(struct ivpu_device *vdev)
363 {
364 	struct ivpu_job *job;
365 	unsigned long id;
366 
367 	xa_for_each(&vdev->submitted_jobs_xa, id, job)
368 		ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
369 }
370 
371 static int ivpu_direct_job_submission(struct ivpu_job *job)
372 {
373 	struct ivpu_file_priv *file_priv = job->file_priv;
374 	struct ivpu_device *vdev = job->vdev;
375 	struct xa_limit job_id_range;
376 	struct ivpu_cmdq *cmdq;
377 	int ret;
378 
379 	mutex_lock(&file_priv->lock);
380 
381 	cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
382 	if (!cmdq) {
383 		ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
384 			  file_priv->ctx.id, job->engine_idx);
385 		ret = -EINVAL;
386 		goto err_unlock;
387 	}
388 
389 	job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
390 	job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
391 
392 	job_get(job, &job);
393 	ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
394 	if (ret) {
395 		ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
396 		goto err_job_put;
397 	}
398 
399 	ret = ivpu_cmdq_push_job(cmdq, job);
400 	if (ret)
401 		goto err_xa_erase;
402 
403 	ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
404 		 job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
405 		 job->engine_idx, cmdq->jobq->header.tail);
406 
407 	if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) {
408 		ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
409 		cmdq->jobq->header.head = cmdq->jobq->header.tail;
410 		wmb(); /* Flush WC buffer for jobq header */
411 	} else {
412 		ivpu_cmdq_ring_db(vdev, cmdq);
413 	}
414 
415 	mutex_unlock(&file_priv->lock);
416 	return 0;
417 
418 err_xa_erase:
419 	xa_erase(&vdev->submitted_jobs_xa, job->job_id);
420 err_job_put:
421 	job_put(job);
422 err_unlock:
423 	mutex_unlock(&file_priv->lock);
424 	return ret;
425 }
426 
427 static int
428 ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
429 				u32 buf_count, u32 commands_offset)
430 {
431 	struct ivpu_file_priv *file_priv = file->driver_priv;
432 	struct ivpu_device *vdev = file_priv->vdev;
433 	struct ww_acquire_ctx acquire_ctx;
434 	struct ivpu_bo *bo;
435 	int ret;
436 	u32 i;
437 
438 	for (i = 0; i < buf_count; i++) {
439 		struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
440 
441 		if (!obj)
442 			return -ENOENT;
443 
444 		job->bos[i] = to_ivpu_bo(obj);
445 
446 		ret = ivpu_bo_pin(job->bos[i]);
447 		if (ret)
448 			return ret;
449 	}
450 
451 	bo = job->bos[CMD_BUF_IDX];
452 	if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) {
453 		ivpu_warn(vdev, "Buffer is already in use\n");
454 		return -EBUSY;
455 	}
456 
457 	if (commands_offset >= bo->base.size) {
458 		ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
459 		return -EINVAL;
460 	}
461 
462 	job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
463 
464 	ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
465 					&acquire_ctx);
466 	if (ret) {
467 		ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
468 		return ret;
469 	}
470 
471 	for (i = 0; i < buf_count; i++) {
472 		ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
473 		if (ret) {
474 			ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
475 			goto unlock_reservations;
476 		}
477 	}
478 
479 	for (i = 0; i < buf_count; i++)
480 		dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
481 
482 unlock_reservations:
483 	drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
484 
485 	wmb(); /* Flush write combining buffers */
486 
487 	return ret;
488 }
489 
490 int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
491 {
492 	int ret = 0;
493 	struct ivpu_file_priv *file_priv = file->driver_priv;
494 	struct ivpu_device *vdev = file_priv->vdev;
495 	struct drm_ivpu_submit *params = data;
496 	struct ivpu_job *job;
497 	u32 *buf_handles;
498 
499 	if (params->engine > DRM_IVPU_ENGINE_COPY)
500 		return -EINVAL;
501 
502 	if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
503 		return -EINVAL;
504 
505 	if (!IS_ALIGNED(params->commands_offset, 8))
506 		return -EINVAL;
507 
508 	if (!file_priv->ctx.id)
509 		return -EINVAL;
510 
511 	if (file_priv->has_mmu_faults)
512 		return -EBADFD;
513 
514 	buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
515 	if (!buf_handles)
516 		return -ENOMEM;
517 
518 	ret = copy_from_user(buf_handles,
519 			     (void __user *)params->buffers_ptr,
520 			     params->buffer_count * sizeof(u32));
521 	if (ret) {
522 		ret = -EFAULT;
523 		goto free_handles;
524 	}
525 
526 	ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
527 		 file_priv->ctx.id, params->buffer_count);
528 
529 	job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
530 	if (!job) {
531 		ivpu_err(vdev, "Failed to create job\n");
532 		ret = -ENOMEM;
533 		goto free_handles;
534 	}
535 
536 	ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
537 					      params->commands_offset);
538 	if (ret) {
539 		ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
540 		goto job_put;
541 	}
542 
543 	ret = ivpu_direct_job_submission(job);
544 	if (ret) {
545 		dma_fence_signal(job->done_fence);
546 		ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
547 	}
548 
549 job_put:
550 	job_put(job);
551 free_handles:
552 	kfree(buf_handles);
553 
554 	return ret;
555 }
556 
557 static int ivpu_job_done_thread(void *arg)
558 {
559 	struct ivpu_device *vdev = (struct ivpu_device *)arg;
560 	struct ivpu_ipc_consumer cons;
561 	struct vpu_jsm_msg jsm_msg;
562 	bool jobs_submitted;
563 	unsigned int timeout;
564 	int ret;
565 
566 	ivpu_dbg(vdev, JOB, "Started %s\n", __func__);
567 
568 	ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET);
569 
570 	while (!kthread_should_stop()) {
571 		timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
572 		jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa);
573 		ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout);
574 		if (!ret) {
575 			ivpu_job_done_message(vdev, &jsm_msg);
576 		} else if (ret == -ETIMEDOUT) {
577 			if (jobs_submitted && !xa_empty(&vdev->submitted_jobs_xa)) {
578 				ivpu_err(vdev, "TDR detected, timeout %d ms", timeout);
579 				ivpu_hw_diagnose_failure(vdev);
580 				ivpu_pm_schedule_recovery(vdev);
581 			}
582 		}
583 	}
584 
585 	ivpu_ipc_consumer_del(vdev, &cons);
586 
587 	ivpu_jobs_abort_all(vdev);
588 
589 	ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__);
590 	return 0;
591 }
592 
593 int ivpu_job_done_thread_init(struct ivpu_device *vdev)
594 {
595 	struct task_struct *thread;
596 
597 	thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread");
598 	if (IS_ERR(thread)) {
599 		ivpu_err(vdev, "Failed to start job completion thread\n");
600 		return -EIO;
601 	}
602 
603 	get_task_struct(thread);
604 	wake_up_process(thread);
605 
606 	vdev->job_done_thread = thread;
607 
608 	return 0;
609 }
610 
611 void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
612 {
613 	kthread_stop(vdev->job_done_thread);
614 	put_task_struct(vdev->job_done_thread);
615 }
616