1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
12 
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
22 
23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
24 #define job_read(dev, reg) readl(dev->iomem + (reg))
25 
26 struct panfrost_queue_state {
27 	struct drm_gpu_scheduler sched;
28 
29 	u64 fence_context;
30 	u64 emit_seqno;
31 };
32 
33 struct panfrost_job_slot {
34 	struct panfrost_queue_state queue[NUM_JOB_SLOTS];
35 	spinlock_t job_lock;
36 };
37 
38 static struct panfrost_job *
39 to_panfrost_job(struct drm_sched_job *sched_job)
40 {
41 	return container_of(sched_job, struct panfrost_job, base);
42 }
43 
44 struct panfrost_fence {
45 	struct dma_fence base;
46 	struct drm_device *dev;
47 	/* panfrost seqno for signaled() test */
48 	u64 seqno;
49 	int queue;
50 };
51 
52 static inline struct panfrost_fence *
53 to_panfrost_fence(struct dma_fence *fence)
54 {
55 	return (struct panfrost_fence *)fence;
56 }
57 
58 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
59 {
60 	return "panfrost";
61 }
62 
63 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
64 {
65 	struct panfrost_fence *f = to_panfrost_fence(fence);
66 
67 	switch (f->queue) {
68 	case 0:
69 		return "panfrost-js-0";
70 	case 1:
71 		return "panfrost-js-1";
72 	case 2:
73 		return "panfrost-js-2";
74 	default:
75 		return NULL;
76 	}
77 }
78 
79 static const struct dma_fence_ops panfrost_fence_ops = {
80 	.get_driver_name = panfrost_fence_get_driver_name,
81 	.get_timeline_name = panfrost_fence_get_timeline_name,
82 };
83 
84 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
85 {
86 	struct panfrost_fence *fence;
87 	struct panfrost_job_slot *js = pfdev->js;
88 
89 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
90 	if (!fence)
91 		return ERR_PTR(-ENOMEM);
92 
93 	fence->dev = pfdev->ddev;
94 	fence->queue = js_num;
95 	fence->seqno = ++js->queue[js_num].emit_seqno;
96 	dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
97 		       js->queue[js_num].fence_context, fence->seqno);
98 
99 	return &fence->base;
100 }
101 
102 static int panfrost_job_get_slot(struct panfrost_job *job)
103 {
104 	/* JS0: fragment jobs.
105 	 * JS1: vertex/tiler jobs
106 	 * JS2: compute jobs
107 	 */
108 	if (job->requirements & PANFROST_JD_REQ_FS)
109 		return 0;
110 
111 /* Not exposed to userspace yet */
112 #if 0
113 	if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
114 		if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
115 		    (job->pfdev->features.nr_core_groups == 2))
116 			return 2;
117 		if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
118 			return 2;
119 	}
120 #endif
121 	return 1;
122 }
123 
124 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
125 					u32 requirements,
126 					int js)
127 {
128 	u64 affinity;
129 
130 	/*
131 	 * Use all cores for now.
132 	 * Eventually we may need to support tiler only jobs and h/w with
133 	 * multiple (2) coherent core groups
134 	 */
135 	affinity = pfdev->features.shader_present;
136 
137 	job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
138 	job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
139 }
140 
141 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
142 {
143 	struct panfrost_device *pfdev = job->pfdev;
144 	unsigned long flags;
145 	u32 cfg;
146 	u64 jc_head = job->jc;
147 	int ret;
148 
149 	ret = pm_runtime_get_sync(pfdev->dev);
150 	if (ret < 0)
151 		return;
152 
153 	if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
154 		goto end;
155 
156 	cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
157 
158 	panfrost_devfreq_record_transition(pfdev, js);
159 	spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
160 
161 	job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
162 	job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
163 
164 	panfrost_job_write_affinity(pfdev, job->requirements, js);
165 
166 	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
167 	 * start */
168 	cfg |= JS_CONFIG_THREAD_PRI(8) |
169 		JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
170 		JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
171 
172 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
173 		cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
174 
175 	if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
176 		cfg |= JS_CONFIG_START_MMU;
177 
178 	job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
179 
180 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
181 		job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
182 
183 	/* GO ! */
184 	dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
185 				job, js, jc_head);
186 
187 	job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
188 
189 	spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
190 
191 end:
192 	pm_runtime_mark_last_busy(pfdev->dev);
193 	pm_runtime_put_autosuspend(pfdev->dev);
194 }
195 
196 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
197 					   int bo_count,
198 					   struct dma_fence **implicit_fences)
199 {
200 	int i;
201 
202 	for (i = 0; i < bo_count; i++)
203 		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
204 }
205 
206 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
207 					  int bo_count,
208 					  struct dma_fence *fence)
209 {
210 	int i;
211 
212 	for (i = 0; i < bo_count; i++)
213 		dma_resv_add_excl_fence(bos[i]->resv, fence);
214 }
215 
216 int panfrost_job_push(struct panfrost_job *job)
217 {
218 	struct panfrost_device *pfdev = job->pfdev;
219 	int slot = panfrost_job_get_slot(job);
220 	struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
221 	struct ww_acquire_ctx acquire_ctx;
222 	int ret = 0;
223 
224 	mutex_lock(&pfdev->sched_lock);
225 
226 	ret = drm_gem_lock_reservations(job->bos, job->bo_count,
227 					    &acquire_ctx);
228 	if (ret) {
229 		mutex_unlock(&pfdev->sched_lock);
230 		return ret;
231 	}
232 
233 	ret = drm_sched_job_init(&job->base, entity, NULL);
234 	if (ret) {
235 		mutex_unlock(&pfdev->sched_lock);
236 		goto unlock;
237 	}
238 
239 	job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
240 
241 	kref_get(&job->refcount); /* put by scheduler job completion */
242 
243 	panfrost_acquire_object_fences(job->bos, job->bo_count,
244 				       job->implicit_fences);
245 
246 	drm_sched_entity_push_job(&job->base, entity);
247 
248 	mutex_unlock(&pfdev->sched_lock);
249 
250 	panfrost_attach_object_fences(job->bos, job->bo_count,
251 				      job->render_done_fence);
252 
253 unlock:
254 	drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
255 
256 	return ret;
257 }
258 
259 static void panfrost_job_cleanup(struct kref *ref)
260 {
261 	struct panfrost_job *job = container_of(ref, struct panfrost_job,
262 						refcount);
263 	unsigned int i;
264 
265 	if (job->in_fences) {
266 		for (i = 0; i < job->in_fence_count; i++)
267 			dma_fence_put(job->in_fences[i]);
268 		kvfree(job->in_fences);
269 	}
270 	if (job->implicit_fences) {
271 		for (i = 0; i < job->bo_count; i++)
272 			dma_fence_put(job->implicit_fences[i]);
273 		kvfree(job->implicit_fences);
274 	}
275 	dma_fence_put(job->done_fence);
276 	dma_fence_put(job->render_done_fence);
277 
278 	if (job->bos) {
279 		for (i = 0; i < job->bo_count; i++)
280 			drm_gem_object_put_unlocked(job->bos[i]);
281 		kvfree(job->bos);
282 	}
283 
284 	kfree(job);
285 }
286 
287 void panfrost_job_put(struct panfrost_job *job)
288 {
289 	kref_put(&job->refcount, panfrost_job_cleanup);
290 }
291 
292 static void panfrost_job_free(struct drm_sched_job *sched_job)
293 {
294 	struct panfrost_job *job = to_panfrost_job(sched_job);
295 
296 	drm_sched_job_cleanup(sched_job);
297 
298 	panfrost_job_put(job);
299 }
300 
301 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
302 						 struct drm_sched_entity *s_entity)
303 {
304 	struct panfrost_job *job = to_panfrost_job(sched_job);
305 	struct dma_fence *fence;
306 	unsigned int i;
307 
308 	/* Explicit fences */
309 	for (i = 0; i < job->in_fence_count; i++) {
310 		if (job->in_fences[i]) {
311 			fence = job->in_fences[i];
312 			job->in_fences[i] = NULL;
313 			return fence;
314 		}
315 	}
316 
317 	/* Implicit fences, max. one per BO */
318 	for (i = 0; i < job->bo_count; i++) {
319 		if (job->implicit_fences[i]) {
320 			fence = job->implicit_fences[i];
321 			job->implicit_fences[i] = NULL;
322 			return fence;
323 		}
324 	}
325 
326 	return NULL;
327 }
328 
329 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
330 {
331 	struct panfrost_job *job = to_panfrost_job(sched_job);
332 	struct panfrost_device *pfdev = job->pfdev;
333 	int slot = panfrost_job_get_slot(job);
334 	struct dma_fence *fence = NULL;
335 
336 	if (unlikely(job->base.s_fence->finished.error))
337 		return NULL;
338 
339 	pfdev->jobs[slot] = job;
340 
341 	fence = panfrost_fence_create(pfdev, slot);
342 	if (IS_ERR(fence))
343 		return NULL;
344 
345 	if (job->done_fence)
346 		dma_fence_put(job->done_fence);
347 	job->done_fence = dma_fence_get(fence);
348 
349 	panfrost_job_hw_submit(job, slot);
350 
351 	return fence;
352 }
353 
354 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
355 {
356 	int j;
357 	u32 irq_mask = 0;
358 
359 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
360 		irq_mask |= MK_JS_MASK(j);
361 	}
362 
363 	job_write(pfdev, JOB_INT_CLEAR, irq_mask);
364 	job_write(pfdev, JOB_INT_MASK, irq_mask);
365 }
366 
367 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
368 {
369 	struct panfrost_job *job = to_panfrost_job(sched_job);
370 	struct panfrost_device *pfdev = job->pfdev;
371 	int js = panfrost_job_get_slot(job);
372 	int i;
373 
374 	/*
375 	 * If the GPU managed to complete this jobs fence, the timeout is
376 	 * spurious. Bail out.
377 	 */
378 	if (dma_fence_is_signaled(job->done_fence))
379 		return;
380 
381 	dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
382 		js,
383 		job_read(pfdev, JS_CONFIG(js)),
384 		job_read(pfdev, JS_STATUS(js)),
385 		job_read(pfdev, JS_HEAD_LO(js)),
386 		job_read(pfdev, JS_TAIL_LO(js)),
387 		sched_job);
388 
389 	mutex_lock(&pfdev->reset_lock);
390 
391 	for (i = 0; i < NUM_JOB_SLOTS; i++)
392 		drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
393 
394 	if (sched_job)
395 		drm_sched_increase_karma(sched_job);
396 
397 	/* panfrost_core_dump(pfdev); */
398 
399 	panfrost_devfreq_record_transition(pfdev, js);
400 	panfrost_device_reset(pfdev);
401 
402 	for (i = 0; i < NUM_JOB_SLOTS; i++)
403 		drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
404 
405 	/* restart scheduler after GPU is usable again */
406 	for (i = 0; i < NUM_JOB_SLOTS; i++)
407 		drm_sched_start(&pfdev->js->queue[i].sched, true);
408 
409 	mutex_unlock(&pfdev->reset_lock);
410 }
411 
412 static const struct drm_sched_backend_ops panfrost_sched_ops = {
413 	.dependency = panfrost_job_dependency,
414 	.run_job = panfrost_job_run,
415 	.timedout_job = panfrost_job_timedout,
416 	.free_job = panfrost_job_free
417 };
418 
419 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
420 {
421 	struct panfrost_device *pfdev = data;
422 	u32 status = job_read(pfdev, JOB_INT_STAT);
423 	int j;
424 
425 	dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
426 
427 	if (!status)
428 		return IRQ_NONE;
429 
430 	pm_runtime_mark_last_busy(pfdev->dev);
431 
432 	for (j = 0; status; j++) {
433 		u32 mask = MK_JS_MASK(j);
434 
435 		if (!(status & mask))
436 			continue;
437 
438 		job_write(pfdev, JOB_INT_CLEAR, mask);
439 
440 		if (status & JOB_INT_MASK_ERR(j)) {
441 			job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
442 
443 			dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
444 				j,
445 				panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
446 				job_read(pfdev, JS_HEAD_LO(j)),
447 				job_read(pfdev, JS_TAIL_LO(j)));
448 
449 			drm_sched_fault(&pfdev->js->queue[j].sched);
450 		}
451 
452 		if (status & JOB_INT_MASK_DONE(j)) {
453 			struct panfrost_job *job = pfdev->jobs[j];
454 
455 			pfdev->jobs[j] = NULL;
456 			panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
457 			panfrost_devfreq_record_transition(pfdev, j);
458 			dma_fence_signal(job->done_fence);
459 		}
460 
461 		status &= ~mask;
462 	}
463 
464 	return IRQ_HANDLED;
465 }
466 
467 int panfrost_job_init(struct panfrost_device *pfdev)
468 {
469 	struct panfrost_job_slot *js;
470 	int ret, j, irq;
471 
472 	pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
473 	if (!js)
474 		return -ENOMEM;
475 
476 	spin_lock_init(&js->job_lock);
477 
478 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
479 	if (irq <= 0)
480 		return -ENODEV;
481 
482 	ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
483 			       IRQF_SHARED, "job", pfdev);
484 	if (ret) {
485 		dev_err(pfdev->dev, "failed to request job irq");
486 		return ret;
487 	}
488 
489 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
490 		js->queue[j].fence_context = dma_fence_context_alloc(1);
491 
492 		ret = drm_sched_init(&js->queue[j].sched,
493 				     &panfrost_sched_ops,
494 				     1, 0, msecs_to_jiffies(500),
495 				     "pan_js");
496 		if (ret) {
497 			dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
498 			goto err_sched;
499 		}
500 	}
501 
502 	panfrost_job_enable_interrupts(pfdev);
503 
504 	return 0;
505 
506 err_sched:
507 	for (j--; j >= 0; j--)
508 		drm_sched_fini(&js->queue[j].sched);
509 
510 	return ret;
511 }
512 
513 void panfrost_job_fini(struct panfrost_device *pfdev)
514 {
515 	struct panfrost_job_slot *js = pfdev->js;
516 	int j;
517 
518 	job_write(pfdev, JOB_INT_MASK, 0);
519 
520 	for (j = 0; j < NUM_JOB_SLOTS; j++)
521 		drm_sched_fini(&js->queue[j].sched);
522 
523 }
524 
525 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
526 {
527 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
528 	struct panfrost_job_slot *js = pfdev->js;
529 	struct drm_sched_rq *rq;
530 	int ret, i;
531 
532 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
533 		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
534 		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
535 		if (WARN_ON(ret))
536 			return ret;
537 	}
538 	return 0;
539 }
540 
541 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
542 {
543 	int i;
544 
545 	for (i = 0; i < NUM_JOB_SLOTS; i++)
546 		drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
547 }
548 
549 int panfrost_job_is_idle(struct panfrost_device *pfdev)
550 {
551 	struct panfrost_job_slot *js = pfdev->js;
552 	int i;
553 
554 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
555 		/* If there are any jobs in the HW queue, we're not idle */
556 		if (atomic_read(&js->queue[i].sched.hw_rq_count))
557 			return false;
558 
559 		/* Check whether the hardware is idle */
560 		if (pfdev->devfreq.slot[i].busy)
561 			return false;
562 	}
563 
564 	return true;
565 }
566