1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/iopoll.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/dma-resv.h>
11 #include <drm/gpu_scheduler.h>
12 #include <drm/panfrost_drm.h>
13 
14 #include "panfrost_device.h"
15 #include "panfrost_devfreq.h"
16 #include "panfrost_job.h"
17 #include "panfrost_features.h"
18 #include "panfrost_issues.h"
19 #include "panfrost_gem.h"
20 #include "panfrost_regs.h"
21 #include "panfrost_gpu.h"
22 #include "panfrost_mmu.h"
23 
24 #define JOB_TIMEOUT_MS 500
25 
26 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
27 #define job_read(dev, reg) readl(dev->iomem + (reg))
28 
29 struct panfrost_queue_state {
30 	struct drm_gpu_scheduler sched;
31 	u64 fence_context;
32 	u64 emit_seqno;
33 };
34 
35 struct panfrost_job_slot {
36 	struct panfrost_queue_state queue[NUM_JOB_SLOTS];
37 	spinlock_t job_lock;
38 	int irq;
39 };
40 
41 static struct panfrost_job *
42 to_panfrost_job(struct drm_sched_job *sched_job)
43 {
44 	return container_of(sched_job, struct panfrost_job, base);
45 }
46 
47 struct panfrost_fence {
48 	struct dma_fence base;
49 	struct drm_device *dev;
50 	/* panfrost seqno for signaled() test */
51 	u64 seqno;
52 	int queue;
53 };
54 
55 static inline struct panfrost_fence *
56 to_panfrost_fence(struct dma_fence *fence)
57 {
58 	return (struct panfrost_fence *)fence;
59 }
60 
61 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
62 {
63 	return "panfrost";
64 }
65 
66 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
67 {
68 	struct panfrost_fence *f = to_panfrost_fence(fence);
69 
70 	switch (f->queue) {
71 	case 0:
72 		return "panfrost-js-0";
73 	case 1:
74 		return "panfrost-js-1";
75 	case 2:
76 		return "panfrost-js-2";
77 	default:
78 		return NULL;
79 	}
80 }
81 
82 static const struct dma_fence_ops panfrost_fence_ops = {
83 	.get_driver_name = panfrost_fence_get_driver_name,
84 	.get_timeline_name = panfrost_fence_get_timeline_name,
85 };
86 
87 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
88 {
89 	struct panfrost_fence *fence;
90 	struct panfrost_job_slot *js = pfdev->js;
91 
92 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
93 	if (!fence)
94 		return ERR_PTR(-ENOMEM);
95 
96 	fence->dev = pfdev->ddev;
97 	fence->queue = js_num;
98 	fence->seqno = ++js->queue[js_num].emit_seqno;
99 	dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
100 		       js->queue[js_num].fence_context, fence->seqno);
101 
102 	return &fence->base;
103 }
104 
105 int panfrost_job_get_slot(struct panfrost_job *job)
106 {
107 	/* JS0: fragment jobs.
108 	 * JS1: vertex/tiler jobs
109 	 * JS2: compute jobs
110 	 */
111 	if (job->requirements & PANFROST_JD_REQ_FS)
112 		return 0;
113 
114 /* Not exposed to userspace yet */
115 #if 0
116 	if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
117 		if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
118 		    (job->pfdev->features.nr_core_groups == 2))
119 			return 2;
120 		if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
121 			return 2;
122 	}
123 #endif
124 	return 1;
125 }
126 
127 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
128 					u32 requirements,
129 					int js)
130 {
131 	u64 affinity;
132 
133 	/*
134 	 * Use all cores for now.
135 	 * Eventually we may need to support tiler only jobs and h/w with
136 	 * multiple (2) coherent core groups
137 	 */
138 	affinity = pfdev->features.shader_present;
139 
140 	job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
141 	job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
142 }
143 
144 static u32
145 panfrost_get_job_chain_flag(const struct panfrost_job *job)
146 {
147 	struct panfrost_fence *f = to_panfrost_fence(job->done_fence);
148 
149 	if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
150 		return 0;
151 
152 	return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0;
153 }
154 
155 static struct panfrost_job *
156 panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
157 {
158 	struct panfrost_job *job = pfdev->jobs[slot][0];
159 
160 	WARN_ON(!job);
161 	pfdev->jobs[slot][0] = pfdev->jobs[slot][1];
162 	pfdev->jobs[slot][1] = NULL;
163 
164 	return job;
165 }
166 
167 static unsigned int
168 panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
169 		     struct panfrost_job *job)
170 {
171 	if (WARN_ON(!job))
172 		return 0;
173 
174 	if (!pfdev->jobs[slot][0]) {
175 		pfdev->jobs[slot][0] = job;
176 		return 0;
177 	}
178 
179 	WARN_ON(pfdev->jobs[slot][1]);
180 	pfdev->jobs[slot][1] = job;
181 	WARN_ON(panfrost_get_job_chain_flag(job) ==
182 		panfrost_get_job_chain_flag(pfdev->jobs[slot][0]));
183 	return 1;
184 }
185 
186 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
187 {
188 	struct panfrost_device *pfdev = job->pfdev;
189 	unsigned int subslot;
190 	u32 cfg;
191 	u64 jc_head = job->jc;
192 	int ret;
193 
194 	panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
195 
196 	ret = pm_runtime_get_sync(pfdev->dev);
197 	if (ret < 0)
198 		return;
199 
200 	if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
201 		return;
202 	}
203 
204 	cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
205 
206 	job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
207 	job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
208 
209 	panfrost_job_write_affinity(pfdev, job->requirements, js);
210 
211 	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
212 	 * start */
213 	cfg |= JS_CONFIG_THREAD_PRI(8) |
214 		JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
215 		JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE |
216 		panfrost_get_job_chain_flag(job);
217 
218 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
219 		cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
220 
221 	if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
222 		cfg |= JS_CONFIG_START_MMU;
223 
224 	job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
225 
226 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
227 		job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
228 
229 	/* GO ! */
230 
231 	spin_lock(&pfdev->js->job_lock);
232 	subslot = panfrost_enqueue_job(pfdev, js, job);
233 	/* Don't queue the job if a reset is in progress */
234 	if (!atomic_read(&pfdev->reset.pending)) {
235 		job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
236 		dev_dbg(pfdev->dev,
237 			"JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d",
238 			job, js, subslot, jc_head, cfg & 0xf);
239 	}
240 	spin_unlock(&pfdev->js->job_lock);
241 }
242 
243 static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
244 					  int bo_count,
245 					  struct drm_sched_job *job)
246 {
247 	int i, ret;
248 
249 	for (i = 0; i < bo_count; i++) {
250 		/* panfrost always uses write mode in its current uapi */
251 		ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
252 							      true);
253 		if (ret)
254 			return ret;
255 	}
256 
257 	return 0;
258 }
259 
260 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
261 					  int bo_count,
262 					  struct dma_fence *fence)
263 {
264 	int i;
265 
266 	for (i = 0; i < bo_count; i++)
267 		dma_resv_add_excl_fence(bos[i]->resv, fence);
268 }
269 
270 int panfrost_job_push(struct panfrost_job *job)
271 {
272 	struct panfrost_device *pfdev = job->pfdev;
273 	struct ww_acquire_ctx acquire_ctx;
274 	int ret = 0;
275 
276 	ret = drm_gem_lock_reservations(job->bos, job->bo_count,
277 					    &acquire_ctx);
278 	if (ret)
279 		return ret;
280 
281 	mutex_lock(&pfdev->sched_lock);
282 	drm_sched_job_arm(&job->base);
283 
284 	job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
285 
286 	ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
287 					     &job->base);
288 	if (ret) {
289 		mutex_unlock(&pfdev->sched_lock);
290 		goto unlock;
291 	}
292 
293 	kref_get(&job->refcount); /* put by scheduler job completion */
294 
295 	drm_sched_entity_push_job(&job->base);
296 
297 	mutex_unlock(&pfdev->sched_lock);
298 
299 	panfrost_attach_object_fences(job->bos, job->bo_count,
300 				      job->render_done_fence);
301 
302 unlock:
303 	drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
304 
305 	return ret;
306 }
307 
308 static void panfrost_job_cleanup(struct kref *ref)
309 {
310 	struct panfrost_job *job = container_of(ref, struct panfrost_job,
311 						refcount);
312 	unsigned int i;
313 
314 	dma_fence_put(job->done_fence);
315 	dma_fence_put(job->render_done_fence);
316 
317 	if (job->mappings) {
318 		for (i = 0; i < job->bo_count; i++) {
319 			if (!job->mappings[i])
320 				break;
321 
322 			atomic_dec(&job->mappings[i]->obj->gpu_usecount);
323 			panfrost_gem_mapping_put(job->mappings[i]);
324 		}
325 		kvfree(job->mappings);
326 	}
327 
328 	if (job->bos) {
329 		for (i = 0; i < job->bo_count; i++)
330 			drm_gem_object_put(job->bos[i]);
331 
332 		kvfree(job->bos);
333 	}
334 
335 	kfree(job);
336 }
337 
338 void panfrost_job_put(struct panfrost_job *job)
339 {
340 	kref_put(&job->refcount, panfrost_job_cleanup);
341 }
342 
343 static void panfrost_job_free(struct drm_sched_job *sched_job)
344 {
345 	struct panfrost_job *job = to_panfrost_job(sched_job);
346 
347 	drm_sched_job_cleanup(sched_job);
348 
349 	panfrost_job_put(job);
350 }
351 
352 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
353 {
354 	struct panfrost_job *job = to_panfrost_job(sched_job);
355 	struct panfrost_device *pfdev = job->pfdev;
356 	int slot = panfrost_job_get_slot(job);
357 	struct dma_fence *fence = NULL;
358 
359 	if (unlikely(job->base.s_fence->finished.error))
360 		return NULL;
361 
362 	/* Nothing to execute: can happen if the job has finished while
363 	 * we were resetting the GPU.
364 	 */
365 	if (!job->jc)
366 		return NULL;
367 
368 	fence = panfrost_fence_create(pfdev, slot);
369 	if (IS_ERR(fence))
370 		return fence;
371 
372 	if (job->done_fence)
373 		dma_fence_put(job->done_fence);
374 	job->done_fence = dma_fence_get(fence);
375 
376 	panfrost_job_hw_submit(job, slot);
377 
378 	return fence;
379 }
380 
381 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
382 {
383 	int j;
384 	u32 irq_mask = 0;
385 
386 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
387 		irq_mask |= MK_JS_MASK(j);
388 	}
389 
390 	job_write(pfdev, JOB_INT_CLEAR, irq_mask);
391 	job_write(pfdev, JOB_INT_MASK, irq_mask);
392 }
393 
394 static void panfrost_job_handle_err(struct panfrost_device *pfdev,
395 				    struct panfrost_job *job,
396 				    unsigned int js)
397 {
398 	u32 js_status = job_read(pfdev, JS_STATUS(js));
399 	const char *exception_name = panfrost_exception_name(js_status);
400 	bool signal_fence = true;
401 
402 	if (!panfrost_exception_is_fault(js_status)) {
403 		dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
404 			js, exception_name,
405 			job_read(pfdev, JS_HEAD_LO(js)),
406 			job_read(pfdev, JS_TAIL_LO(js)));
407 	} else {
408 		dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
409 			js, exception_name,
410 			job_read(pfdev, JS_HEAD_LO(js)),
411 			job_read(pfdev, JS_TAIL_LO(js)));
412 	}
413 
414 	if (js_status == DRM_PANFROST_EXCEPTION_STOPPED) {
415 		/* Update the job head so we can resume */
416 		job->jc = job_read(pfdev, JS_TAIL_LO(js)) |
417 			  ((u64)job_read(pfdev, JS_TAIL_HI(js)) << 32);
418 
419 		/* The job will be resumed, don't signal the fence */
420 		signal_fence = false;
421 	} else if (js_status == DRM_PANFROST_EXCEPTION_TERMINATED) {
422 		/* Job has been hard-stopped, flag it as canceled */
423 		dma_fence_set_error(job->done_fence, -ECANCELED);
424 		job->jc = 0;
425 	} else if (panfrost_exception_is_fault(js_status)) {
426 		/* We might want to provide finer-grained error code based on
427 		 * the exception type, but unconditionally setting to EINVAL
428 		 * is good enough for now.
429 		 */
430 		dma_fence_set_error(job->done_fence, -EINVAL);
431 		job->jc = 0;
432 	}
433 
434 	panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
435 	panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
436 
437 	if (signal_fence)
438 		dma_fence_signal_locked(job->done_fence);
439 
440 	pm_runtime_put_autosuspend(pfdev->dev);
441 
442 	if (panfrost_exception_needs_reset(pfdev, js_status)) {
443 		atomic_set(&pfdev->reset.pending, 1);
444 		drm_sched_fault(&pfdev->js->queue[js].sched);
445 	}
446 }
447 
448 static void panfrost_job_handle_done(struct panfrost_device *pfdev,
449 				     struct panfrost_job *job)
450 {
451 	/* Set ->jc to 0 to avoid re-submitting an already finished job (can
452 	 * happen when we receive the DONE interrupt while doing a GPU reset).
453 	 */
454 	job->jc = 0;
455 	panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
456 	panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
457 
458 	dma_fence_signal_locked(job->done_fence);
459 	pm_runtime_put_autosuspend(pfdev->dev);
460 }
461 
462 static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
463 {
464 	struct panfrost_job *done[NUM_JOB_SLOTS][2] = {};
465 	struct panfrost_job *failed[NUM_JOB_SLOTS] = {};
466 	u32 js_state = 0, js_events = 0;
467 	unsigned int i, j;
468 
469 	/* First we collect all failed/done jobs. */
470 	while (status) {
471 		u32 js_state_mask = 0;
472 
473 		for (j = 0; j < NUM_JOB_SLOTS; j++) {
474 			if (status & MK_JS_MASK(j))
475 				js_state_mask |= MK_JS_MASK(j);
476 
477 			if (status & JOB_INT_MASK_DONE(j)) {
478 				if (done[j][0])
479 					done[j][1] = panfrost_dequeue_job(pfdev, j);
480 				else
481 					done[j][0] = panfrost_dequeue_job(pfdev, j);
482 			}
483 
484 			if (status & JOB_INT_MASK_ERR(j)) {
485 				/* Cancel the next submission. Will be submitted
486 				 * after we're done handling this failure if
487 				 * there's no reset pending.
488 				 */
489 				job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
490 				failed[j] = panfrost_dequeue_job(pfdev, j);
491 			}
492 		}
493 
494 		/* JS_STATE is sampled when JOB_INT_CLEAR is written.
495 		 * For each BIT(slot) or BIT(slot + 16) bit written to
496 		 * JOB_INT_CLEAR, the corresponding bits in JS_STATE
497 		 * (BIT(slot) and BIT(slot + 16)) are updated, but this
498 		 * is racy. If we only have one job done at the time we
499 		 * read JOB_INT_RAWSTAT but the second job fails before we
500 		 * clear the status, we end up with a status containing
501 		 * only the DONE bit and consider both jobs as DONE since
502 		 * JS_STATE reports both NEXT and CURRENT as inactive.
503 		 * To prevent that, let's repeat this clear+read steps
504 		 * until status is 0.
505 		 */
506 		job_write(pfdev, JOB_INT_CLEAR, status);
507 		js_state &= ~js_state_mask;
508 		js_state |= job_read(pfdev, JOB_INT_JS_STATE) & js_state_mask;
509 		js_events |= status;
510 		status = job_read(pfdev, JOB_INT_RAWSTAT);
511 	}
512 
513 	/* Then we handle the dequeued jobs. */
514 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
515 		if (!(js_events & MK_JS_MASK(j)))
516 			continue;
517 
518 		if (failed[j]) {
519 			panfrost_job_handle_err(pfdev, failed[j], j);
520 		} else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) {
521 			/* When the current job doesn't fail, the JM dequeues
522 			 * the next job without waiting for an ACK, this means
523 			 * we can have 2 jobs dequeued and only catch the
524 			 * interrupt when the second one is done. If both slots
525 			 * are inactive, but one job remains in pfdev->jobs[j],
526 			 * consider it done. Of course that doesn't apply if a
527 			 * failure happened since we cancelled execution of the
528 			 * job in _NEXT (see above).
529 			 */
530 			if (WARN_ON(!done[j][0]))
531 				done[j][0] = panfrost_dequeue_job(pfdev, j);
532 			else
533 				done[j][1] = panfrost_dequeue_job(pfdev, j);
534 		}
535 
536 		for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++)
537 			panfrost_job_handle_done(pfdev, done[j][i]);
538 	}
539 
540 	/* And finally we requeue jobs that were waiting in the second slot
541 	 * and have been stopped if we detected a failure on the first slot.
542 	 */
543 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
544 		if (!(js_events & MK_JS_MASK(j)))
545 			continue;
546 
547 		if (!failed[j] || !pfdev->jobs[j][0])
548 			continue;
549 
550 		if (pfdev->jobs[j][0]->jc == 0) {
551 			/* The job was cancelled, signal the fence now */
552 			struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j);
553 
554 			dma_fence_set_error(canceled->done_fence, -ECANCELED);
555 			panfrost_job_handle_done(pfdev, canceled);
556 		} else if (!atomic_read(&pfdev->reset.pending)) {
557 			/* Requeue the job we removed if no reset is pending */
558 			job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START);
559 		}
560 	}
561 }
562 
563 static void panfrost_job_handle_irqs(struct panfrost_device *pfdev)
564 {
565 	u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
566 
567 	while (status) {
568 		pm_runtime_mark_last_busy(pfdev->dev);
569 
570 		spin_lock(&pfdev->js->job_lock);
571 		panfrost_job_handle_irq(pfdev, status);
572 		spin_unlock(&pfdev->js->job_lock);
573 		status = job_read(pfdev, JOB_INT_RAWSTAT);
574 	}
575 }
576 
577 static u32 panfrost_active_slots(struct panfrost_device *pfdev,
578 				 u32 *js_state_mask, u32 js_state)
579 {
580 	u32 rawstat;
581 
582 	if (!(js_state & *js_state_mask))
583 		return 0;
584 
585 	rawstat = job_read(pfdev, JOB_INT_RAWSTAT);
586 	if (rawstat) {
587 		unsigned int i;
588 
589 		for (i = 0; i < NUM_JOB_SLOTS; i++) {
590 			if (rawstat & MK_JS_MASK(i))
591 				*js_state_mask &= ~MK_JS_MASK(i);
592 		}
593 	}
594 
595 	return js_state & *js_state_mask;
596 }
597 
598 static void
599 panfrost_reset(struct panfrost_device *pfdev,
600 	       struct drm_sched_job *bad)
601 {
602 	u32 js_state, js_state_mask = 0xffffffff;
603 	unsigned int i, j;
604 	bool cookie;
605 	int ret;
606 
607 	if (!atomic_read(&pfdev->reset.pending))
608 		return;
609 
610 	/* Stop the schedulers.
611 	 *
612 	 * FIXME: We temporarily get out of the dma_fence_signalling section
613 	 * because the cleanup path generate lockdep splats when taking locks
614 	 * to release job resources. We should rework the code to follow this
615 	 * pattern:
616 	 *
617 	 *	try_lock
618 	 *	if (locked)
619 	 *		release
620 	 *	else
621 	 *		schedule_work_to_release_later
622 	 */
623 	for (i = 0; i < NUM_JOB_SLOTS; i++)
624 		drm_sched_stop(&pfdev->js->queue[i].sched, bad);
625 
626 	cookie = dma_fence_begin_signalling();
627 
628 	if (bad)
629 		drm_sched_increase_karma(bad);
630 
631 	/* Mask job interrupts and synchronize to make sure we won't be
632 	 * interrupted during our reset.
633 	 */
634 	job_write(pfdev, JOB_INT_MASK, 0);
635 	synchronize_irq(pfdev->js->irq);
636 
637 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
638 		/* Cancel the next job and soft-stop the running job. */
639 		job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
640 		job_write(pfdev, JS_COMMAND(i), JS_COMMAND_SOFT_STOP);
641 	}
642 
643 	/* Wait at most 10ms for soft-stops to complete */
644 	ret = readl_poll_timeout(pfdev->iomem + JOB_INT_JS_STATE, js_state,
645 				 !panfrost_active_slots(pfdev, &js_state_mask, js_state),
646 				 10, 10000);
647 
648 	if (ret)
649 		dev_err(pfdev->dev, "Soft-stop failed\n");
650 
651 	/* Handle the remaining interrupts before we reset. */
652 	panfrost_job_handle_irqs(pfdev);
653 
654 	/* Remaining interrupts have been handled, but we might still have
655 	 * stuck jobs. Let's make sure the PM counters stay balanced by
656 	 * manually calling pm_runtime_put_noidle() and
657 	 * panfrost_devfreq_record_idle() for each stuck job.
658 	 */
659 	spin_lock(&pfdev->js->job_lock);
660 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
661 		for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) {
662 			pm_runtime_put_noidle(pfdev->dev);
663 			panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
664 		}
665 	}
666 	memset(pfdev->jobs, 0, sizeof(pfdev->jobs));
667 	spin_unlock(&pfdev->js->job_lock);
668 
669 	/* Proceed with reset now. */
670 	panfrost_device_reset(pfdev);
671 
672 	/* panfrost_device_reset() unmasks job interrupts, but we want to
673 	 * keep them masked a bit longer.
674 	 */
675 	job_write(pfdev, JOB_INT_MASK, 0);
676 
677 	/* GPU has been reset, we can clear the reset pending bit. */
678 	atomic_set(&pfdev->reset.pending, 0);
679 
680 	/* Now resubmit jobs that were previously queued but didn't have a
681 	 * chance to finish.
682 	 * FIXME: We temporarily get out of the DMA fence signalling section
683 	 * while resubmitting jobs because the job submission logic will
684 	 * allocate memory with the GFP_KERNEL flag which can trigger memory
685 	 * reclaim and exposes a lock ordering issue.
686 	 */
687 	dma_fence_end_signalling(cookie);
688 	for (i = 0; i < NUM_JOB_SLOTS; i++)
689 		drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
690 	cookie = dma_fence_begin_signalling();
691 
692 	/* Restart the schedulers */
693 	for (i = 0; i < NUM_JOB_SLOTS; i++)
694 		drm_sched_start(&pfdev->js->queue[i].sched, true);
695 
696 	/* Re-enable job interrupts now that everything has been restarted. */
697 	job_write(pfdev, JOB_INT_MASK,
698 		  GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
699 		  GENMASK(NUM_JOB_SLOTS - 1, 0));
700 
701 	dma_fence_end_signalling(cookie);
702 }
703 
704 static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
705 						     *sched_job)
706 {
707 	struct panfrost_job *job = to_panfrost_job(sched_job);
708 	struct panfrost_device *pfdev = job->pfdev;
709 	int js = panfrost_job_get_slot(job);
710 
711 	/*
712 	 * If the GPU managed to complete this jobs fence, the timeout is
713 	 * spurious. Bail out.
714 	 */
715 	if (dma_fence_is_signaled(job->done_fence))
716 		return DRM_GPU_SCHED_STAT_NOMINAL;
717 
718 	dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
719 		js,
720 		job_read(pfdev, JS_CONFIG(js)),
721 		job_read(pfdev, JS_STATUS(js)),
722 		job_read(pfdev, JS_HEAD_LO(js)),
723 		job_read(pfdev, JS_TAIL_LO(js)),
724 		sched_job);
725 
726 	atomic_set(&pfdev->reset.pending, 1);
727 	panfrost_reset(pfdev, sched_job);
728 
729 	return DRM_GPU_SCHED_STAT_NOMINAL;
730 }
731 
732 static void panfrost_reset_work(struct work_struct *work)
733 {
734 	struct panfrost_device *pfdev;
735 
736 	pfdev = container_of(work, struct panfrost_device, reset.work);
737 	panfrost_reset(pfdev, NULL);
738 }
739 
740 static const struct drm_sched_backend_ops panfrost_sched_ops = {
741 	.run_job = panfrost_job_run,
742 	.timedout_job = panfrost_job_timedout,
743 	.free_job = panfrost_job_free
744 };
745 
746 static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
747 {
748 	struct panfrost_device *pfdev = data;
749 
750 	panfrost_job_handle_irqs(pfdev);
751 	job_write(pfdev, JOB_INT_MASK,
752 		  GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
753 		  GENMASK(NUM_JOB_SLOTS - 1, 0));
754 	return IRQ_HANDLED;
755 }
756 
757 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
758 {
759 	struct panfrost_device *pfdev = data;
760 	u32 status = job_read(pfdev, JOB_INT_STAT);
761 
762 	if (!status)
763 		return IRQ_NONE;
764 
765 	job_write(pfdev, JOB_INT_MASK, 0);
766 	return IRQ_WAKE_THREAD;
767 }
768 
769 int panfrost_job_init(struct panfrost_device *pfdev)
770 {
771 	struct panfrost_job_slot *js;
772 	unsigned int nentries = 2;
773 	int ret, j;
774 
775 	/* All GPUs have two entries per queue, but without jobchain
776 	 * disambiguation stopping the right job in the close path is tricky,
777 	 * so let's just advertise one entry in that case.
778 	 */
779 	if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
780 		nentries = 1;
781 
782 	pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
783 	if (!js)
784 		return -ENOMEM;
785 
786 	INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
787 	spin_lock_init(&js->job_lock);
788 
789 	js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
790 	if (js->irq <= 0)
791 		return -ENODEV;
792 
793 	ret = devm_request_threaded_irq(pfdev->dev, js->irq,
794 					panfrost_job_irq_handler,
795 					panfrost_job_irq_handler_thread,
796 					IRQF_SHARED, KBUILD_MODNAME "-job",
797 					pfdev);
798 	if (ret) {
799 		dev_err(pfdev->dev, "failed to request job irq");
800 		return ret;
801 	}
802 
803 	pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
804 	if (!pfdev->reset.wq)
805 		return -ENOMEM;
806 
807 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
808 		js->queue[j].fence_context = dma_fence_context_alloc(1);
809 
810 		ret = drm_sched_init(&js->queue[j].sched,
811 				     &panfrost_sched_ops,
812 				     nentries, 0,
813 				     msecs_to_jiffies(JOB_TIMEOUT_MS),
814 				     pfdev->reset.wq,
815 				     NULL, "pan_js");
816 		if (ret) {
817 			dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
818 			goto err_sched;
819 		}
820 	}
821 
822 	panfrost_job_enable_interrupts(pfdev);
823 
824 	return 0;
825 
826 err_sched:
827 	for (j--; j >= 0; j--)
828 		drm_sched_fini(&js->queue[j].sched);
829 
830 	destroy_workqueue(pfdev->reset.wq);
831 	return ret;
832 }
833 
834 void panfrost_job_fini(struct panfrost_device *pfdev)
835 {
836 	struct panfrost_job_slot *js = pfdev->js;
837 	int j;
838 
839 	job_write(pfdev, JOB_INT_MASK, 0);
840 
841 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
842 		drm_sched_fini(&js->queue[j].sched);
843 	}
844 
845 	cancel_work_sync(&pfdev->reset.work);
846 	destroy_workqueue(pfdev->reset.wq);
847 }
848 
849 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
850 {
851 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
852 	struct panfrost_job_slot *js = pfdev->js;
853 	struct drm_gpu_scheduler *sched;
854 	int ret, i;
855 
856 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
857 		sched = &js->queue[i].sched;
858 		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
859 					    DRM_SCHED_PRIORITY_NORMAL, &sched,
860 					    1, NULL);
861 		if (WARN_ON(ret))
862 			return ret;
863 	}
864 	return 0;
865 }
866 
867 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
868 {
869 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
870 	int i;
871 
872 	for (i = 0; i < NUM_JOB_SLOTS; i++)
873 		drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
874 
875 	/* Kill in-flight jobs */
876 	spin_lock(&pfdev->js->job_lock);
877 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
878 		struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
879 		int j;
880 
881 		for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
882 			struct panfrost_job *job = pfdev->jobs[i][j];
883 			u32 cmd;
884 
885 			if (!job || job->base.entity != entity)
886 				continue;
887 
888 			if (j == 1) {
889 				/* Try to cancel the job before it starts */
890 				job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
891 				/* Reset the job head so it doesn't get restarted if
892 				 * the job in the first slot failed.
893 				 */
894 				job->jc = 0;
895 			}
896 
897 			if (panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
898 				cmd = panfrost_get_job_chain_flag(job) ?
899 				      JS_COMMAND_HARD_STOP_1 :
900 				      JS_COMMAND_HARD_STOP_0;
901 			} else {
902 				cmd = JS_COMMAND_HARD_STOP;
903 			}
904 
905 			job_write(pfdev, JS_COMMAND(i), cmd);
906 		}
907 	}
908 	spin_unlock(&pfdev->js->job_lock);
909 }
910 
911 int panfrost_job_is_idle(struct panfrost_device *pfdev)
912 {
913 	struct panfrost_job_slot *js = pfdev->js;
914 	int i;
915 
916 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
917 		/* If there are any jobs in the HW queue, we're not idle */
918 		if (atomic_read(&js->queue[i].sched.hw_rq_count))
919 			return false;
920 	}
921 
922 	return true;
923 }
924