1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
12 
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
22 
23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
24 #define job_read(dev, reg) readl(dev->iomem + (reg))
25 
26 struct panfrost_queue_state {
27 	struct drm_gpu_scheduler sched;
28 
29 	u64 fence_context;
30 	u64 emit_seqno;
31 };
32 
33 struct panfrost_job_slot {
34 	struct panfrost_queue_state queue[NUM_JOB_SLOTS];
35 	spinlock_t job_lock;
36 };
37 
38 static struct panfrost_job *
39 to_panfrost_job(struct drm_sched_job *sched_job)
40 {
41 	return container_of(sched_job, struct panfrost_job, base);
42 }
43 
44 struct panfrost_fence {
45 	struct dma_fence base;
46 	struct drm_device *dev;
47 	/* panfrost seqno for signaled() test */
48 	u64 seqno;
49 	int queue;
50 };
51 
52 static inline struct panfrost_fence *
53 to_panfrost_fence(struct dma_fence *fence)
54 {
55 	return (struct panfrost_fence *)fence;
56 }
57 
58 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
59 {
60 	return "panfrost";
61 }
62 
63 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
64 {
65 	struct panfrost_fence *f = to_panfrost_fence(fence);
66 
67 	switch (f->queue) {
68 	case 0:
69 		return "panfrost-js-0";
70 	case 1:
71 		return "panfrost-js-1";
72 	case 2:
73 		return "panfrost-js-2";
74 	default:
75 		return NULL;
76 	}
77 }
78 
79 static const struct dma_fence_ops panfrost_fence_ops = {
80 	.get_driver_name = panfrost_fence_get_driver_name,
81 	.get_timeline_name = panfrost_fence_get_timeline_name,
82 };
83 
84 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
85 {
86 	struct panfrost_fence *fence;
87 	struct panfrost_job_slot *js = pfdev->js;
88 
89 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
90 	if (!fence)
91 		return ERR_PTR(-ENOMEM);
92 
93 	fence->dev = pfdev->ddev;
94 	fence->queue = js_num;
95 	fence->seqno = ++js->queue[js_num].emit_seqno;
96 	dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
97 		       js->queue[js_num].fence_context, fence->seqno);
98 
99 	return &fence->base;
100 }
101 
102 static int panfrost_job_get_slot(struct panfrost_job *job)
103 {
104 	/* JS0: fragment jobs.
105 	 * JS1: vertex/tiler jobs
106 	 * JS2: compute jobs
107 	 */
108 	if (job->requirements & PANFROST_JD_REQ_FS)
109 		return 0;
110 
111 /* Not exposed to userspace yet */
112 #if 0
113 	if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
114 		if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
115 		    (job->pfdev->features.nr_core_groups == 2))
116 			return 2;
117 		if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
118 			return 2;
119 	}
120 #endif
121 	return 1;
122 }
123 
124 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
125 					u32 requirements,
126 					int js)
127 {
128 	u64 affinity;
129 
130 	/*
131 	 * Use all cores for now.
132 	 * Eventually we may need to support tiler only jobs and h/w with
133 	 * multiple (2) coherent core groups
134 	 */
135 	affinity = pfdev->features.shader_present;
136 
137 	job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
138 	job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
139 }
140 
141 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
142 {
143 	struct panfrost_device *pfdev = job->pfdev;
144 	u32 cfg;
145 	u64 jc_head = job->jc;
146 	int ret;
147 
148 	ret = pm_runtime_get_sync(pfdev->dev);
149 	if (ret < 0)
150 		return;
151 
152 	if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
153 		pm_runtime_put_sync_autosuspend(pfdev->dev);
154 		return;
155 	}
156 
157 	cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
158 
159 	panfrost_devfreq_record_transition(pfdev, js);
160 
161 	job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
162 	job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
163 
164 	panfrost_job_write_affinity(pfdev, job->requirements, js);
165 
166 	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
167 	 * start */
168 	cfg |= JS_CONFIG_THREAD_PRI(8) |
169 		JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
170 		JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
171 
172 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
173 		cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
174 
175 	if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
176 		cfg |= JS_CONFIG_START_MMU;
177 
178 	job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
179 
180 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
181 		job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
182 
183 	/* GO ! */
184 	dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
185 				job, js, jc_head);
186 
187 	job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
188 }
189 
190 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
191 					   int bo_count,
192 					   struct dma_fence **implicit_fences)
193 {
194 	int i;
195 
196 	for (i = 0; i < bo_count; i++)
197 		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
198 }
199 
200 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
201 					  int bo_count,
202 					  struct dma_fence *fence)
203 {
204 	int i;
205 
206 	for (i = 0; i < bo_count; i++)
207 		dma_resv_add_excl_fence(bos[i]->resv, fence);
208 }
209 
210 int panfrost_job_push(struct panfrost_job *job)
211 {
212 	struct panfrost_device *pfdev = job->pfdev;
213 	int slot = panfrost_job_get_slot(job);
214 	struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
215 	struct ww_acquire_ctx acquire_ctx;
216 	int ret = 0;
217 
218 	mutex_lock(&pfdev->sched_lock);
219 
220 	ret = drm_gem_lock_reservations(job->bos, job->bo_count,
221 					    &acquire_ctx);
222 	if (ret) {
223 		mutex_unlock(&pfdev->sched_lock);
224 		return ret;
225 	}
226 
227 	ret = drm_sched_job_init(&job->base, entity, NULL);
228 	if (ret) {
229 		mutex_unlock(&pfdev->sched_lock);
230 		goto unlock;
231 	}
232 
233 	job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
234 
235 	kref_get(&job->refcount); /* put by scheduler job completion */
236 
237 	panfrost_acquire_object_fences(job->bos, job->bo_count,
238 				       job->implicit_fences);
239 
240 	drm_sched_entity_push_job(&job->base, entity);
241 
242 	mutex_unlock(&pfdev->sched_lock);
243 
244 	panfrost_attach_object_fences(job->bos, job->bo_count,
245 				      job->render_done_fence);
246 
247 unlock:
248 	drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
249 
250 	return ret;
251 }
252 
253 static void panfrost_job_cleanup(struct kref *ref)
254 {
255 	struct panfrost_job *job = container_of(ref, struct panfrost_job,
256 						refcount);
257 	unsigned int i;
258 
259 	if (job->in_fences) {
260 		for (i = 0; i < job->in_fence_count; i++)
261 			dma_fence_put(job->in_fences[i]);
262 		kvfree(job->in_fences);
263 	}
264 	if (job->implicit_fences) {
265 		for (i = 0; i < job->bo_count; i++)
266 			dma_fence_put(job->implicit_fences[i]);
267 		kvfree(job->implicit_fences);
268 	}
269 	dma_fence_put(job->done_fence);
270 	dma_fence_put(job->render_done_fence);
271 
272 	if (job->bos) {
273 		for (i = 0; i < job->bo_count; i++)
274 			drm_gem_object_put_unlocked(job->bos[i]);
275 		kvfree(job->bos);
276 	}
277 
278 	kfree(job);
279 }
280 
281 void panfrost_job_put(struct panfrost_job *job)
282 {
283 	kref_put(&job->refcount, panfrost_job_cleanup);
284 }
285 
286 static void panfrost_job_free(struct drm_sched_job *sched_job)
287 {
288 	struct panfrost_job *job = to_panfrost_job(sched_job);
289 
290 	drm_sched_job_cleanup(sched_job);
291 
292 	panfrost_job_put(job);
293 }
294 
295 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
296 						 struct drm_sched_entity *s_entity)
297 {
298 	struct panfrost_job *job = to_panfrost_job(sched_job);
299 	struct dma_fence *fence;
300 	unsigned int i;
301 
302 	/* Explicit fences */
303 	for (i = 0; i < job->in_fence_count; i++) {
304 		if (job->in_fences[i]) {
305 			fence = job->in_fences[i];
306 			job->in_fences[i] = NULL;
307 			return fence;
308 		}
309 	}
310 
311 	/* Implicit fences, max. one per BO */
312 	for (i = 0; i < job->bo_count; i++) {
313 		if (job->implicit_fences[i]) {
314 			fence = job->implicit_fences[i];
315 			job->implicit_fences[i] = NULL;
316 			return fence;
317 		}
318 	}
319 
320 	return NULL;
321 }
322 
323 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
324 {
325 	struct panfrost_job *job = to_panfrost_job(sched_job);
326 	struct panfrost_device *pfdev = job->pfdev;
327 	int slot = panfrost_job_get_slot(job);
328 	struct dma_fence *fence = NULL;
329 
330 	if (unlikely(job->base.s_fence->finished.error))
331 		return NULL;
332 
333 	pfdev->jobs[slot] = job;
334 
335 	fence = panfrost_fence_create(pfdev, slot);
336 	if (IS_ERR(fence))
337 		return NULL;
338 
339 	if (job->done_fence)
340 		dma_fence_put(job->done_fence);
341 	job->done_fence = dma_fence_get(fence);
342 
343 	panfrost_job_hw_submit(job, slot);
344 
345 	return fence;
346 }
347 
348 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
349 {
350 	int j;
351 	u32 irq_mask = 0;
352 
353 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
354 		irq_mask |= MK_JS_MASK(j);
355 	}
356 
357 	job_write(pfdev, JOB_INT_CLEAR, irq_mask);
358 	job_write(pfdev, JOB_INT_MASK, irq_mask);
359 }
360 
361 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
362 {
363 	struct panfrost_job *job = to_panfrost_job(sched_job);
364 	struct panfrost_device *pfdev = job->pfdev;
365 	int js = panfrost_job_get_slot(job);
366 	unsigned long flags;
367 	int i;
368 
369 	/*
370 	 * If the GPU managed to complete this jobs fence, the timeout is
371 	 * spurious. Bail out.
372 	 */
373 	if (dma_fence_is_signaled(job->done_fence))
374 		return;
375 
376 	dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
377 		js,
378 		job_read(pfdev, JS_CONFIG(js)),
379 		job_read(pfdev, JS_STATUS(js)),
380 		job_read(pfdev, JS_HEAD_LO(js)),
381 		job_read(pfdev, JS_TAIL_LO(js)),
382 		sched_job);
383 
384 	if (!mutex_trylock(&pfdev->reset_lock))
385 		return;
386 
387 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
388 		struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
389 
390 		drm_sched_stop(sched, sched_job);
391 		if (js != i)
392 			/* Ensure any timeouts on other slots have finished */
393 			cancel_delayed_work_sync(&sched->work_tdr);
394 	}
395 
396 	drm_sched_increase_karma(sched_job);
397 
398 	spin_lock_irqsave(&pfdev->js->job_lock, flags);
399 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
400 		if (pfdev->jobs[i]) {
401 			pm_runtime_put_noidle(pfdev->dev);
402 			pfdev->jobs[i] = NULL;
403 		}
404 	}
405 	spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
406 
407 	/* panfrost_core_dump(pfdev); */
408 
409 	panfrost_devfreq_record_transition(pfdev, js);
410 	panfrost_device_reset(pfdev);
411 
412 	for (i = 0; i < NUM_JOB_SLOTS; i++)
413 		drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
414 
415 	/* restart scheduler after GPU is usable again */
416 	for (i = 0; i < NUM_JOB_SLOTS; i++)
417 		drm_sched_start(&pfdev->js->queue[i].sched, true);
418 
419 	mutex_unlock(&pfdev->reset_lock);
420 }
421 
422 static const struct drm_sched_backend_ops panfrost_sched_ops = {
423 	.dependency = panfrost_job_dependency,
424 	.run_job = panfrost_job_run,
425 	.timedout_job = panfrost_job_timedout,
426 	.free_job = panfrost_job_free
427 };
428 
429 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
430 {
431 	struct panfrost_device *pfdev = data;
432 	u32 status = job_read(pfdev, JOB_INT_STAT);
433 	int j;
434 
435 	dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
436 
437 	if (!status)
438 		return IRQ_NONE;
439 
440 	pm_runtime_mark_last_busy(pfdev->dev);
441 
442 	for (j = 0; status; j++) {
443 		u32 mask = MK_JS_MASK(j);
444 
445 		if (!(status & mask))
446 			continue;
447 
448 		job_write(pfdev, JOB_INT_CLEAR, mask);
449 
450 		if (status & JOB_INT_MASK_ERR(j)) {
451 			job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
452 
453 			dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
454 				j,
455 				panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
456 				job_read(pfdev, JS_HEAD_LO(j)),
457 				job_read(pfdev, JS_TAIL_LO(j)));
458 
459 			drm_sched_fault(&pfdev->js->queue[j].sched);
460 		}
461 
462 		if (status & JOB_INT_MASK_DONE(j)) {
463 			struct panfrost_job *job;
464 
465 			spin_lock(&pfdev->js->job_lock);
466 			job = pfdev->jobs[j];
467 			/* Only NULL if job timeout occurred */
468 			if (job) {
469 				pfdev->jobs[j] = NULL;
470 
471 				panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
472 				panfrost_devfreq_record_transition(pfdev, j);
473 
474 				dma_fence_signal_locked(job->done_fence);
475 				pm_runtime_put_autosuspend(pfdev->dev);
476 			}
477 			spin_unlock(&pfdev->js->job_lock);
478 		}
479 
480 		status &= ~mask;
481 	}
482 
483 	return IRQ_HANDLED;
484 }
485 
486 int panfrost_job_init(struct panfrost_device *pfdev)
487 {
488 	struct panfrost_job_slot *js;
489 	int ret, j, irq;
490 
491 	pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
492 	if (!js)
493 		return -ENOMEM;
494 
495 	spin_lock_init(&js->job_lock);
496 
497 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
498 	if (irq <= 0)
499 		return -ENODEV;
500 
501 	ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
502 			       IRQF_SHARED, "job", pfdev);
503 	if (ret) {
504 		dev_err(pfdev->dev, "failed to request job irq");
505 		return ret;
506 	}
507 
508 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
509 		js->queue[j].fence_context = dma_fence_context_alloc(1);
510 
511 		ret = drm_sched_init(&js->queue[j].sched,
512 				     &panfrost_sched_ops,
513 				     1, 0, msecs_to_jiffies(500),
514 				     "pan_js");
515 		if (ret) {
516 			dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
517 			goto err_sched;
518 		}
519 	}
520 
521 	panfrost_job_enable_interrupts(pfdev);
522 
523 	return 0;
524 
525 err_sched:
526 	for (j--; j >= 0; j--)
527 		drm_sched_fini(&js->queue[j].sched);
528 
529 	return ret;
530 }
531 
532 void panfrost_job_fini(struct panfrost_device *pfdev)
533 {
534 	struct panfrost_job_slot *js = pfdev->js;
535 	int j;
536 
537 	job_write(pfdev, JOB_INT_MASK, 0);
538 
539 	for (j = 0; j < NUM_JOB_SLOTS; j++)
540 		drm_sched_fini(&js->queue[j].sched);
541 
542 }
543 
544 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
545 {
546 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
547 	struct panfrost_job_slot *js = pfdev->js;
548 	struct drm_sched_rq *rq;
549 	int ret, i;
550 
551 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
552 		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
553 		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
554 		if (WARN_ON(ret))
555 			return ret;
556 	}
557 	return 0;
558 }
559 
560 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
561 {
562 	int i;
563 
564 	for (i = 0; i < NUM_JOB_SLOTS; i++)
565 		drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
566 }
567 
568 int panfrost_job_is_idle(struct panfrost_device *pfdev)
569 {
570 	struct panfrost_job_slot *js = pfdev->js;
571 	int i;
572 
573 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
574 		/* If there are any jobs in the HW queue, we're not idle */
575 		if (atomic_read(&js->queue[i].sched.hw_rq_count))
576 			return false;
577 
578 		/* Check whether the hardware is idle */
579 		if (pfdev->devfreq.slot[i].busy)
580 			return false;
581 	}
582 
583 	return true;
584 }
585