1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 3 /* Copyright 2019 Collabora ltd. */ 4 #include <linux/delay.h> 5 #include <linux/interrupt.h> 6 #include <linux/io.h> 7 #include <linux/platform_device.h> 8 #include <linux/pm_runtime.h> 9 #include <linux/dma-resv.h> 10 #include <drm/gpu_scheduler.h> 11 #include <drm/panfrost_drm.h> 12 13 #include "panfrost_device.h" 14 #include "panfrost_devfreq.h" 15 #include "panfrost_job.h" 16 #include "panfrost_features.h" 17 #include "panfrost_issues.h" 18 #include "panfrost_gem.h" 19 #include "panfrost_regs.h" 20 #include "panfrost_gpu.h" 21 #include "panfrost_mmu.h" 22 23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) 24 #define job_read(dev, reg) readl(dev->iomem + (reg)) 25 26 struct panfrost_queue_state { 27 struct drm_gpu_scheduler sched; 28 29 u64 fence_context; 30 u64 emit_seqno; 31 }; 32 33 struct panfrost_job_slot { 34 struct panfrost_queue_state queue[NUM_JOB_SLOTS]; 35 spinlock_t job_lock; 36 }; 37 38 static struct panfrost_job * 39 to_panfrost_job(struct drm_sched_job *sched_job) 40 { 41 return container_of(sched_job, struct panfrost_job, base); 42 } 43 44 struct panfrost_fence { 45 struct dma_fence base; 46 struct drm_device *dev; 47 /* panfrost seqno for signaled() test */ 48 u64 seqno; 49 int queue; 50 }; 51 52 static inline struct panfrost_fence * 53 to_panfrost_fence(struct dma_fence *fence) 54 { 55 return (struct panfrost_fence *)fence; 56 } 57 58 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence) 59 { 60 return "panfrost"; 61 } 62 63 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence) 64 { 65 struct panfrost_fence *f = to_panfrost_fence(fence); 66 67 switch (f->queue) { 68 case 0: 69 return "panfrost-js-0"; 70 case 1: 71 return "panfrost-js-1"; 72 case 2: 73 return "panfrost-js-2"; 74 default: 75 return NULL; 76 } 77 } 78 79 static const struct dma_fence_ops panfrost_fence_ops = { 80 .get_driver_name = panfrost_fence_get_driver_name, 81 .get_timeline_name = panfrost_fence_get_timeline_name, 82 }; 83 84 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num) 85 { 86 struct panfrost_fence *fence; 87 struct panfrost_job_slot *js = pfdev->js; 88 89 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 90 if (!fence) 91 return ERR_PTR(-ENOMEM); 92 93 fence->dev = pfdev->ddev; 94 fence->queue = js_num; 95 fence->seqno = ++js->queue[js_num].emit_seqno; 96 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock, 97 js->queue[js_num].fence_context, fence->seqno); 98 99 return &fence->base; 100 } 101 102 static int panfrost_job_get_slot(struct panfrost_job *job) 103 { 104 /* JS0: fragment jobs. 105 * JS1: vertex/tiler jobs 106 * JS2: compute jobs 107 */ 108 if (job->requirements & PANFROST_JD_REQ_FS) 109 return 0; 110 111 /* Not exposed to userspace yet */ 112 #if 0 113 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { 114 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && 115 (job->pfdev->features.nr_core_groups == 2)) 116 return 2; 117 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) 118 return 2; 119 } 120 #endif 121 return 1; 122 } 123 124 static void panfrost_job_write_affinity(struct panfrost_device *pfdev, 125 u32 requirements, 126 int js) 127 { 128 u64 affinity; 129 130 /* 131 * Use all cores for now. 132 * Eventually we may need to support tiler only jobs and h/w with 133 * multiple (2) coherent core groups 134 */ 135 affinity = pfdev->features.shader_present; 136 137 job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF); 138 job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32); 139 } 140 141 static void panfrost_job_hw_submit(struct panfrost_job *job, int js) 142 { 143 struct panfrost_device *pfdev = job->pfdev; 144 u32 cfg; 145 u64 jc_head = job->jc; 146 int ret; 147 148 ret = pm_runtime_get_sync(pfdev->dev); 149 if (ret < 0) 150 return; 151 152 if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) { 153 pm_runtime_put_sync_autosuspend(pfdev->dev); 154 return; 155 } 156 157 cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu); 158 panfrost_devfreq_record_busy(pfdev); 159 160 job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF); 161 job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32); 162 163 panfrost_job_write_affinity(pfdev, job->requirements, js); 164 165 /* start MMU, medium priority, cache clean/flush on end, clean/flush on 166 * start */ 167 cfg |= JS_CONFIG_THREAD_PRI(8) | 168 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE | 169 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE; 170 171 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) 172 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION; 173 174 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649)) 175 cfg |= JS_CONFIG_START_MMU; 176 177 job_write(pfdev, JS_CONFIG_NEXT(js), cfg); 178 179 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) 180 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id); 181 182 /* GO ! */ 183 dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx", 184 job, js, jc_head); 185 186 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); 187 } 188 189 static void panfrost_acquire_object_fences(struct drm_gem_object **bos, 190 int bo_count, 191 struct dma_fence **implicit_fences) 192 { 193 int i; 194 195 for (i = 0; i < bo_count; i++) 196 implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv); 197 } 198 199 static void panfrost_attach_object_fences(struct drm_gem_object **bos, 200 int bo_count, 201 struct dma_fence *fence) 202 { 203 int i; 204 205 for (i = 0; i < bo_count; i++) 206 dma_resv_add_excl_fence(bos[i]->resv, fence); 207 } 208 209 int panfrost_job_push(struct panfrost_job *job) 210 { 211 struct panfrost_device *pfdev = job->pfdev; 212 int slot = panfrost_job_get_slot(job); 213 struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot]; 214 struct ww_acquire_ctx acquire_ctx; 215 int ret = 0; 216 217 mutex_lock(&pfdev->sched_lock); 218 219 ret = drm_gem_lock_reservations(job->bos, job->bo_count, 220 &acquire_ctx); 221 if (ret) { 222 mutex_unlock(&pfdev->sched_lock); 223 return ret; 224 } 225 226 ret = drm_sched_job_init(&job->base, entity, NULL); 227 if (ret) { 228 mutex_unlock(&pfdev->sched_lock); 229 goto unlock; 230 } 231 232 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); 233 234 kref_get(&job->refcount); /* put by scheduler job completion */ 235 236 panfrost_acquire_object_fences(job->bos, job->bo_count, 237 job->implicit_fences); 238 239 drm_sched_entity_push_job(&job->base, entity); 240 241 mutex_unlock(&pfdev->sched_lock); 242 243 panfrost_attach_object_fences(job->bos, job->bo_count, 244 job->render_done_fence); 245 246 unlock: 247 drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx); 248 249 return ret; 250 } 251 252 static void panfrost_job_cleanup(struct kref *ref) 253 { 254 struct panfrost_job *job = container_of(ref, struct panfrost_job, 255 refcount); 256 unsigned int i; 257 258 if (job->in_fences) { 259 for (i = 0; i < job->in_fence_count; i++) 260 dma_fence_put(job->in_fences[i]); 261 kvfree(job->in_fences); 262 } 263 if (job->implicit_fences) { 264 for (i = 0; i < job->bo_count; i++) 265 dma_fence_put(job->implicit_fences[i]); 266 kvfree(job->implicit_fences); 267 } 268 dma_fence_put(job->done_fence); 269 dma_fence_put(job->render_done_fence); 270 271 if (job->mappings) { 272 for (i = 0; i < job->bo_count; i++) 273 panfrost_gem_mapping_put(job->mappings[i]); 274 kvfree(job->mappings); 275 } 276 277 if (job->bos) { 278 struct panfrost_gem_object *bo; 279 280 for (i = 0; i < job->bo_count; i++) { 281 bo = to_panfrost_bo(job->bos[i]); 282 drm_gem_object_put_unlocked(job->bos[i]); 283 } 284 285 kvfree(job->bos); 286 } 287 288 kfree(job); 289 } 290 291 void panfrost_job_put(struct panfrost_job *job) 292 { 293 kref_put(&job->refcount, panfrost_job_cleanup); 294 } 295 296 static void panfrost_job_free(struct drm_sched_job *sched_job) 297 { 298 struct panfrost_job *job = to_panfrost_job(sched_job); 299 300 drm_sched_job_cleanup(sched_job); 301 302 panfrost_job_put(job); 303 } 304 305 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job, 306 struct drm_sched_entity *s_entity) 307 { 308 struct panfrost_job *job = to_panfrost_job(sched_job); 309 struct dma_fence *fence; 310 unsigned int i; 311 312 /* Explicit fences */ 313 for (i = 0; i < job->in_fence_count; i++) { 314 if (job->in_fences[i]) { 315 fence = job->in_fences[i]; 316 job->in_fences[i] = NULL; 317 return fence; 318 } 319 } 320 321 /* Implicit fences, max. one per BO */ 322 for (i = 0; i < job->bo_count; i++) { 323 if (job->implicit_fences[i]) { 324 fence = job->implicit_fences[i]; 325 job->implicit_fences[i] = NULL; 326 return fence; 327 } 328 } 329 330 return NULL; 331 } 332 333 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) 334 { 335 struct panfrost_job *job = to_panfrost_job(sched_job); 336 struct panfrost_device *pfdev = job->pfdev; 337 int slot = panfrost_job_get_slot(job); 338 struct dma_fence *fence = NULL; 339 340 if (unlikely(job->base.s_fence->finished.error)) 341 return NULL; 342 343 pfdev->jobs[slot] = job; 344 345 fence = panfrost_fence_create(pfdev, slot); 346 if (IS_ERR(fence)) 347 return NULL; 348 349 if (job->done_fence) 350 dma_fence_put(job->done_fence); 351 job->done_fence = dma_fence_get(fence); 352 353 panfrost_job_hw_submit(job, slot); 354 355 return fence; 356 } 357 358 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) 359 { 360 int j; 361 u32 irq_mask = 0; 362 363 for (j = 0; j < NUM_JOB_SLOTS; j++) { 364 irq_mask |= MK_JS_MASK(j); 365 } 366 367 job_write(pfdev, JOB_INT_CLEAR, irq_mask); 368 job_write(pfdev, JOB_INT_MASK, irq_mask); 369 } 370 371 static void panfrost_job_timedout(struct drm_sched_job *sched_job) 372 { 373 struct panfrost_job *job = to_panfrost_job(sched_job); 374 struct panfrost_device *pfdev = job->pfdev; 375 int js = panfrost_job_get_slot(job); 376 unsigned long flags; 377 int i; 378 379 /* 380 * If the GPU managed to complete this jobs fence, the timeout is 381 * spurious. Bail out. 382 */ 383 if (dma_fence_is_signaled(job->done_fence)) 384 return; 385 386 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", 387 js, 388 job_read(pfdev, JS_CONFIG(js)), 389 job_read(pfdev, JS_STATUS(js)), 390 job_read(pfdev, JS_HEAD_LO(js)), 391 job_read(pfdev, JS_TAIL_LO(js)), 392 sched_job); 393 394 if (!mutex_trylock(&pfdev->reset_lock)) 395 return; 396 397 for (i = 0; i < NUM_JOB_SLOTS; i++) { 398 struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; 399 400 drm_sched_stop(sched, sched_job); 401 if (js != i) 402 /* Ensure any timeouts on other slots have finished */ 403 cancel_delayed_work_sync(&sched->work_tdr); 404 } 405 406 drm_sched_increase_karma(sched_job); 407 408 spin_lock_irqsave(&pfdev->js->job_lock, flags); 409 for (i = 0; i < NUM_JOB_SLOTS; i++) { 410 if (pfdev->jobs[i]) { 411 pm_runtime_put_noidle(pfdev->dev); 412 pfdev->jobs[i] = NULL; 413 } 414 } 415 spin_unlock_irqrestore(&pfdev->js->job_lock, flags); 416 417 panfrost_devfreq_record_idle(pfdev); 418 panfrost_device_reset(pfdev); 419 420 for (i = 0; i < NUM_JOB_SLOTS; i++) 421 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); 422 423 /* restart scheduler after GPU is usable again */ 424 for (i = 0; i < NUM_JOB_SLOTS; i++) 425 drm_sched_start(&pfdev->js->queue[i].sched, true); 426 427 mutex_unlock(&pfdev->reset_lock); 428 } 429 430 static const struct drm_sched_backend_ops panfrost_sched_ops = { 431 .dependency = panfrost_job_dependency, 432 .run_job = panfrost_job_run, 433 .timedout_job = panfrost_job_timedout, 434 .free_job = panfrost_job_free 435 }; 436 437 static irqreturn_t panfrost_job_irq_handler(int irq, void *data) 438 { 439 struct panfrost_device *pfdev = data; 440 u32 status = job_read(pfdev, JOB_INT_STAT); 441 int j; 442 443 dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status); 444 445 if (!status) 446 return IRQ_NONE; 447 448 pm_runtime_mark_last_busy(pfdev->dev); 449 450 for (j = 0; status; j++) { 451 u32 mask = MK_JS_MASK(j); 452 453 if (!(status & mask)) 454 continue; 455 456 job_write(pfdev, JOB_INT_CLEAR, mask); 457 458 if (status & JOB_INT_MASK_ERR(j)) { 459 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); 460 461 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", 462 j, 463 panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))), 464 job_read(pfdev, JS_HEAD_LO(j)), 465 job_read(pfdev, JS_TAIL_LO(j))); 466 467 drm_sched_fault(&pfdev->js->queue[j].sched); 468 } 469 470 if (status & JOB_INT_MASK_DONE(j)) { 471 struct panfrost_job *job; 472 473 spin_lock(&pfdev->js->job_lock); 474 job = pfdev->jobs[j]; 475 /* Only NULL if job timeout occurred */ 476 if (job) { 477 pfdev->jobs[j] = NULL; 478 479 panfrost_mmu_as_put(pfdev, &job->file_priv->mmu); 480 panfrost_devfreq_record_idle(pfdev); 481 482 dma_fence_signal_locked(job->done_fence); 483 pm_runtime_put_autosuspend(pfdev->dev); 484 } 485 spin_unlock(&pfdev->js->job_lock); 486 } 487 488 status &= ~mask; 489 } 490 491 return IRQ_HANDLED; 492 } 493 494 int panfrost_job_init(struct panfrost_device *pfdev) 495 { 496 struct panfrost_job_slot *js; 497 int ret, j, irq; 498 499 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); 500 if (!js) 501 return -ENOMEM; 502 503 spin_lock_init(&js->job_lock); 504 505 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); 506 if (irq <= 0) 507 return -ENODEV; 508 509 ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler, 510 IRQF_SHARED, "job", pfdev); 511 if (ret) { 512 dev_err(pfdev->dev, "failed to request job irq"); 513 return ret; 514 } 515 516 for (j = 0; j < NUM_JOB_SLOTS; j++) { 517 js->queue[j].fence_context = dma_fence_context_alloc(1); 518 519 ret = drm_sched_init(&js->queue[j].sched, 520 &panfrost_sched_ops, 521 1, 0, msecs_to_jiffies(500), 522 "pan_js"); 523 if (ret) { 524 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); 525 goto err_sched; 526 } 527 } 528 529 panfrost_job_enable_interrupts(pfdev); 530 531 return 0; 532 533 err_sched: 534 for (j--; j >= 0; j--) 535 drm_sched_fini(&js->queue[j].sched); 536 537 return ret; 538 } 539 540 void panfrost_job_fini(struct panfrost_device *pfdev) 541 { 542 struct panfrost_job_slot *js = pfdev->js; 543 int j; 544 545 job_write(pfdev, JOB_INT_MASK, 0); 546 547 for (j = 0; j < NUM_JOB_SLOTS; j++) 548 drm_sched_fini(&js->queue[j].sched); 549 550 } 551 552 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) 553 { 554 struct panfrost_device *pfdev = panfrost_priv->pfdev; 555 struct panfrost_job_slot *js = pfdev->js; 556 struct drm_gpu_scheduler *sched; 557 int ret, i; 558 559 for (i = 0; i < NUM_JOB_SLOTS; i++) { 560 sched = &js->queue[i].sched; 561 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], 562 DRM_SCHED_PRIORITY_NORMAL, &sched, 563 1, NULL); 564 if (WARN_ON(ret)) 565 return ret; 566 } 567 return 0; 568 } 569 570 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) 571 { 572 int i; 573 574 for (i = 0; i < NUM_JOB_SLOTS; i++) 575 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); 576 } 577 578 int panfrost_job_is_idle(struct panfrost_device *pfdev) 579 { 580 struct panfrost_job_slot *js = pfdev->js; 581 int i; 582 583 /* Check whether the hardware is idle */ 584 if (atomic_read(&pfdev->devfreq.busy_count)) 585 return false; 586 587 for (i = 0; i < NUM_JOB_SLOTS; i++) { 588 /* If there are any jobs in the HW queue, we're not idle */ 589 if (atomic_read(&js->queue[i].sched.hw_rq_count)) 590 return false; 591 } 592 593 return true; 594 } 595