1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "msm_gpu.h" 19 #include "msm_gem.h" 20 #include "msm_mmu.h" 21 #include "msm_fence.h" 22 23 24 /* 25 * Power Management: 26 */ 27 28 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING 29 #include <mach/board.h> 30 static void bs_init(struct msm_gpu *gpu) 31 { 32 if (gpu->bus_scale_table) { 33 gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table); 34 DBG("bus scale client: %08x", gpu->bsc); 35 } 36 } 37 38 static void bs_fini(struct msm_gpu *gpu) 39 { 40 if (gpu->bsc) { 41 msm_bus_scale_unregister_client(gpu->bsc); 42 gpu->bsc = 0; 43 } 44 } 45 46 static void bs_set(struct msm_gpu *gpu, int idx) 47 { 48 if (gpu->bsc) { 49 DBG("set bus scaling: %d", idx); 50 msm_bus_scale_client_update_request(gpu->bsc, idx); 51 } 52 } 53 #else 54 static void bs_init(struct msm_gpu *gpu) {} 55 static void bs_fini(struct msm_gpu *gpu) {} 56 static void bs_set(struct msm_gpu *gpu, int idx) {} 57 #endif 58 59 static int enable_pwrrail(struct msm_gpu *gpu) 60 { 61 struct drm_device *dev = gpu->dev; 62 int ret = 0; 63 64 if (gpu->gpu_reg) { 65 ret = regulator_enable(gpu->gpu_reg); 66 if (ret) { 67 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret); 68 return ret; 69 } 70 } 71 72 if (gpu->gpu_cx) { 73 ret = regulator_enable(gpu->gpu_cx); 74 if (ret) { 75 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret); 76 return ret; 77 } 78 } 79 80 return 0; 81 } 82 83 static int disable_pwrrail(struct msm_gpu *gpu) 84 { 85 if (gpu->gpu_cx) 86 regulator_disable(gpu->gpu_cx); 87 if (gpu->gpu_reg) 88 regulator_disable(gpu->gpu_reg); 89 return 0; 90 } 91 92 static int enable_clk(struct msm_gpu *gpu) 93 { 94 struct clk *rate_clk = NULL; 95 int i; 96 97 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */ 98 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { 99 if (gpu->grp_clks[i]) { 100 clk_prepare(gpu->grp_clks[i]); 101 rate_clk = gpu->grp_clks[i]; 102 } 103 } 104 105 if (rate_clk && gpu->fast_rate) 106 clk_set_rate(rate_clk, gpu->fast_rate); 107 108 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) 109 if (gpu->grp_clks[i]) 110 clk_enable(gpu->grp_clks[i]); 111 112 return 0; 113 } 114 115 static int disable_clk(struct msm_gpu *gpu) 116 { 117 struct clk *rate_clk = NULL; 118 int i; 119 120 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */ 121 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { 122 if (gpu->grp_clks[i]) { 123 clk_disable(gpu->grp_clks[i]); 124 rate_clk = gpu->grp_clks[i]; 125 } 126 } 127 128 if (rate_clk && gpu->slow_rate) 129 clk_set_rate(rate_clk, gpu->slow_rate); 130 131 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) 132 if (gpu->grp_clks[i]) 133 clk_unprepare(gpu->grp_clks[i]); 134 135 return 0; 136 } 137 138 static int enable_axi(struct msm_gpu *gpu) 139 { 140 if (gpu->ebi1_clk) 141 clk_prepare_enable(gpu->ebi1_clk); 142 if (gpu->bus_freq) 143 bs_set(gpu, gpu->bus_freq); 144 return 0; 145 } 146 147 static int disable_axi(struct msm_gpu *gpu) 148 { 149 if (gpu->ebi1_clk) 150 clk_disable_unprepare(gpu->ebi1_clk); 151 if (gpu->bus_freq) 152 bs_set(gpu, 0); 153 return 0; 154 } 155 156 int msm_gpu_pm_resume(struct msm_gpu *gpu) 157 { 158 struct drm_device *dev = gpu->dev; 159 int ret; 160 161 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); 162 163 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 164 165 if (gpu->active_cnt++ > 0) 166 return 0; 167 168 if (WARN_ON(gpu->active_cnt <= 0)) 169 return -EINVAL; 170 171 ret = enable_pwrrail(gpu); 172 if (ret) 173 return ret; 174 175 ret = enable_clk(gpu); 176 if (ret) 177 return ret; 178 179 ret = enable_axi(gpu); 180 if (ret) 181 return ret; 182 183 return 0; 184 } 185 186 int msm_gpu_pm_suspend(struct msm_gpu *gpu) 187 { 188 struct drm_device *dev = gpu->dev; 189 int ret; 190 191 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); 192 193 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 194 195 if (--gpu->active_cnt > 0) 196 return 0; 197 198 if (WARN_ON(gpu->active_cnt < 0)) 199 return -EINVAL; 200 201 ret = disable_axi(gpu); 202 if (ret) 203 return ret; 204 205 ret = disable_clk(gpu); 206 if (ret) 207 return ret; 208 209 ret = disable_pwrrail(gpu); 210 if (ret) 211 return ret; 212 213 return 0; 214 } 215 216 /* 217 * Inactivity detection (for suspend): 218 */ 219 220 static void inactive_worker(struct work_struct *work) 221 { 222 struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work); 223 struct drm_device *dev = gpu->dev; 224 225 if (gpu->inactive) 226 return; 227 228 DBG("%s: inactive!\n", gpu->name); 229 mutex_lock(&dev->struct_mutex); 230 if (!(msm_gpu_active(gpu) || gpu->inactive)) { 231 disable_axi(gpu); 232 disable_clk(gpu); 233 gpu->inactive = true; 234 } 235 mutex_unlock(&dev->struct_mutex); 236 } 237 238 static void inactive_handler(unsigned long data) 239 { 240 struct msm_gpu *gpu = (struct msm_gpu *)data; 241 struct msm_drm_private *priv = gpu->dev->dev_private; 242 243 queue_work(priv->wq, &gpu->inactive_work); 244 } 245 246 /* cancel inactive timer and make sure we are awake: */ 247 static void inactive_cancel(struct msm_gpu *gpu) 248 { 249 DBG("%s", gpu->name); 250 del_timer(&gpu->inactive_timer); 251 if (gpu->inactive) { 252 enable_clk(gpu); 253 enable_axi(gpu); 254 gpu->inactive = false; 255 } 256 } 257 258 static void inactive_start(struct msm_gpu *gpu) 259 { 260 DBG("%s", gpu->name); 261 mod_timer(&gpu->inactive_timer, 262 round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES)); 263 } 264 265 /* 266 * Hangcheck detection for locked gpu: 267 */ 268 269 static void retire_submits(struct msm_gpu *gpu); 270 271 static void recover_worker(struct work_struct *work) 272 { 273 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); 274 struct drm_device *dev = gpu->dev; 275 struct msm_gem_submit *submit; 276 uint32_t fence = gpu->funcs->last_fence(gpu); 277 278 msm_update_fence(gpu->fctx, fence + 1); 279 280 mutex_lock(&dev->struct_mutex); 281 282 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); 283 list_for_each_entry(submit, &gpu->submit_list, node) { 284 if (submit->fence->seqno == (fence + 1)) { 285 struct task_struct *task; 286 287 rcu_read_lock(); 288 task = pid_task(submit->pid, PIDTYPE_PID); 289 if (task) { 290 dev_err(dev->dev, "%s: offending task: %s\n", 291 gpu->name, task->comm); 292 } 293 rcu_read_unlock(); 294 break; 295 } 296 } 297 298 if (msm_gpu_active(gpu)) { 299 /* retire completed submits, plus the one that hung: */ 300 retire_submits(gpu); 301 302 inactive_cancel(gpu); 303 gpu->funcs->recover(gpu); 304 305 /* replay the remaining submits after the one that hung: */ 306 list_for_each_entry(submit, &gpu->submit_list, node) { 307 gpu->funcs->submit(gpu, submit, NULL); 308 } 309 } 310 311 mutex_unlock(&dev->struct_mutex); 312 313 msm_gpu_retire(gpu); 314 } 315 316 static void hangcheck_timer_reset(struct msm_gpu *gpu) 317 { 318 DBG("%s", gpu->name); 319 mod_timer(&gpu->hangcheck_timer, 320 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES)); 321 } 322 323 static void hangcheck_handler(unsigned long data) 324 { 325 struct msm_gpu *gpu = (struct msm_gpu *)data; 326 struct drm_device *dev = gpu->dev; 327 struct msm_drm_private *priv = dev->dev_private; 328 uint32_t fence = gpu->funcs->last_fence(gpu); 329 330 if (fence != gpu->hangcheck_fence) { 331 /* some progress has been made.. ya! */ 332 gpu->hangcheck_fence = fence; 333 } else if (fence < gpu->fctx->last_fence) { 334 /* no progress and not done.. hung! */ 335 gpu->hangcheck_fence = fence; 336 dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", 337 gpu->name); 338 dev_err(dev->dev, "%s: completed fence: %u\n", 339 gpu->name, fence); 340 dev_err(dev->dev, "%s: submitted fence: %u\n", 341 gpu->name, gpu->fctx->last_fence); 342 queue_work(priv->wq, &gpu->recover_work); 343 } 344 345 /* if still more pending work, reset the hangcheck timer: */ 346 if (gpu->fctx->last_fence > gpu->hangcheck_fence) 347 hangcheck_timer_reset(gpu); 348 349 /* workaround for missing irq: */ 350 queue_work(priv->wq, &gpu->retire_work); 351 } 352 353 /* 354 * Performance Counters: 355 */ 356 357 /* called under perf_lock */ 358 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) 359 { 360 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; 361 int i, n = min(ncntrs, gpu->num_perfcntrs); 362 363 /* read current values: */ 364 for (i = 0; i < gpu->num_perfcntrs; i++) 365 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); 366 367 /* update cntrs: */ 368 for (i = 0; i < n; i++) 369 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; 370 371 /* save current values: */ 372 for (i = 0; i < gpu->num_perfcntrs; i++) 373 gpu->last_cntrs[i] = current_cntrs[i]; 374 375 return n; 376 } 377 378 static void update_sw_cntrs(struct msm_gpu *gpu) 379 { 380 ktime_t time; 381 uint32_t elapsed; 382 unsigned long flags; 383 384 spin_lock_irqsave(&gpu->perf_lock, flags); 385 if (!gpu->perfcntr_active) 386 goto out; 387 388 time = ktime_get(); 389 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); 390 391 gpu->totaltime += elapsed; 392 if (gpu->last_sample.active) 393 gpu->activetime += elapsed; 394 395 gpu->last_sample.active = msm_gpu_active(gpu); 396 gpu->last_sample.time = time; 397 398 out: 399 spin_unlock_irqrestore(&gpu->perf_lock, flags); 400 } 401 402 void msm_gpu_perfcntr_start(struct msm_gpu *gpu) 403 { 404 unsigned long flags; 405 406 spin_lock_irqsave(&gpu->perf_lock, flags); 407 /* we could dynamically enable/disable perfcntr registers too.. */ 408 gpu->last_sample.active = msm_gpu_active(gpu); 409 gpu->last_sample.time = ktime_get(); 410 gpu->activetime = gpu->totaltime = 0; 411 gpu->perfcntr_active = true; 412 update_hw_cntrs(gpu, 0, NULL); 413 spin_unlock_irqrestore(&gpu->perf_lock, flags); 414 } 415 416 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) 417 { 418 gpu->perfcntr_active = false; 419 } 420 421 /* returns -errno or # of cntrs sampled */ 422 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, 423 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs) 424 { 425 unsigned long flags; 426 int ret; 427 428 spin_lock_irqsave(&gpu->perf_lock, flags); 429 430 if (!gpu->perfcntr_active) { 431 ret = -EINVAL; 432 goto out; 433 } 434 435 *activetime = gpu->activetime; 436 *totaltime = gpu->totaltime; 437 438 gpu->activetime = gpu->totaltime = 0; 439 440 ret = update_hw_cntrs(gpu, ncntrs, cntrs); 441 442 out: 443 spin_unlock_irqrestore(&gpu->perf_lock, flags); 444 445 return ret; 446 } 447 448 /* 449 * Cmdstream submission/retirement: 450 */ 451 452 static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) 453 { 454 int i; 455 456 for (i = 0; i < submit->nr_bos; i++) { 457 struct msm_gem_object *msm_obj = submit->bos[i].obj; 458 /* move to inactive: */ 459 msm_gem_move_to_inactive(&msm_obj->base); 460 msm_gem_put_iova(&msm_obj->base, gpu->id); 461 drm_gem_object_unreference(&msm_obj->base); 462 } 463 464 msm_gem_submit_free(submit); 465 } 466 467 static void retire_submits(struct msm_gpu *gpu) 468 { 469 struct drm_device *dev = gpu->dev; 470 471 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 472 473 while (!list_empty(&gpu->submit_list)) { 474 struct msm_gem_submit *submit; 475 476 submit = list_first_entry(&gpu->submit_list, 477 struct msm_gem_submit, node); 478 479 if (fence_is_signaled(submit->fence)) { 480 retire_submit(gpu, submit); 481 } else { 482 break; 483 } 484 } 485 } 486 487 static void retire_worker(struct work_struct *work) 488 { 489 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); 490 struct drm_device *dev = gpu->dev; 491 uint32_t fence = gpu->funcs->last_fence(gpu); 492 493 msm_update_fence(gpu->fctx, fence); 494 495 mutex_lock(&dev->struct_mutex); 496 retire_submits(gpu); 497 mutex_unlock(&dev->struct_mutex); 498 499 if (!msm_gpu_active(gpu)) 500 inactive_start(gpu); 501 } 502 503 /* call from irq handler to schedule work to retire bo's */ 504 void msm_gpu_retire(struct msm_gpu *gpu) 505 { 506 struct msm_drm_private *priv = gpu->dev->dev_private; 507 queue_work(priv->wq, &gpu->retire_work); 508 update_sw_cntrs(gpu); 509 } 510 511 /* add bo's to gpu's ring, and kick gpu: */ 512 int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 513 struct msm_file_private *ctx) 514 { 515 struct drm_device *dev = gpu->dev; 516 struct msm_drm_private *priv = dev->dev_private; 517 int i, ret; 518 519 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 520 521 submit->fence = msm_fence_alloc(gpu->fctx); 522 if (IS_ERR(submit->fence)) { 523 ret = PTR_ERR(submit->fence); 524 submit->fence = NULL; 525 return ret; 526 } 527 528 inactive_cancel(gpu); 529 530 list_add_tail(&submit->node, &gpu->submit_list); 531 532 msm_rd_dump_submit(submit); 533 534 update_sw_cntrs(gpu); 535 536 for (i = 0; i < submit->nr_bos; i++) { 537 struct msm_gem_object *msm_obj = submit->bos[i].obj; 538 uint32_t iova; 539 540 /* can't happen yet.. but when we add 2d support we'll have 541 * to deal w/ cross-ring synchronization: 542 */ 543 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); 544 545 /* submit takes a reference to the bo and iova until retired: */ 546 drm_gem_object_reference(&msm_obj->base); 547 msm_gem_get_iova_locked(&msm_obj->base, 548 submit->gpu->id, &iova); 549 550 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 551 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 552 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) 553 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); 554 } 555 556 gpu->funcs->submit(gpu, submit, ctx); 557 priv->lastctx = ctx; 558 559 hangcheck_timer_reset(gpu); 560 561 return 0; 562 } 563 564 /* 565 * Init/Cleanup: 566 */ 567 568 static irqreturn_t irq_handler(int irq, void *data) 569 { 570 struct msm_gpu *gpu = data; 571 return gpu->funcs->irq(gpu); 572 } 573 574 static const char *clk_names[] = { 575 "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", 576 "alt_mem_iface_clk", 577 }; 578 579 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 580 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 581 const char *name, const char *ioname, const char *irqname, int ringsz) 582 { 583 struct iommu_domain *iommu; 584 int i, ret; 585 586 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) 587 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); 588 589 gpu->dev = drm; 590 gpu->funcs = funcs; 591 gpu->name = name; 592 gpu->inactive = true; 593 gpu->fctx = msm_fence_context_alloc(drm, name); 594 if (IS_ERR(gpu->fctx)) { 595 ret = PTR_ERR(gpu->fctx); 596 gpu->fctx = NULL; 597 goto fail; 598 } 599 600 INIT_LIST_HEAD(&gpu->active_list); 601 INIT_WORK(&gpu->retire_work, retire_worker); 602 INIT_WORK(&gpu->inactive_work, inactive_worker); 603 INIT_WORK(&gpu->recover_work, recover_worker); 604 605 INIT_LIST_HEAD(&gpu->submit_list); 606 607 setup_timer(&gpu->inactive_timer, inactive_handler, 608 (unsigned long)gpu); 609 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 610 (unsigned long)gpu); 611 612 spin_lock_init(&gpu->perf_lock); 613 614 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); 615 616 /* Map registers: */ 617 gpu->mmio = msm_ioremap(pdev, ioname, name); 618 if (IS_ERR(gpu->mmio)) { 619 ret = PTR_ERR(gpu->mmio); 620 goto fail; 621 } 622 623 /* Get Interrupt: */ 624 gpu->irq = platform_get_irq_byname(pdev, irqname); 625 if (gpu->irq < 0) { 626 ret = gpu->irq; 627 dev_err(drm->dev, "failed to get irq: %d\n", ret); 628 goto fail; 629 } 630 631 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 632 IRQF_TRIGGER_HIGH, gpu->name, gpu); 633 if (ret) { 634 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); 635 goto fail; 636 } 637 638 /* Acquire clocks: */ 639 for (i = 0; i < ARRAY_SIZE(clk_names); i++) { 640 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); 641 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); 642 if (IS_ERR(gpu->grp_clks[i])) 643 gpu->grp_clks[i] = NULL; 644 } 645 646 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); 647 DBG("ebi1_clk: %p", gpu->ebi1_clk); 648 if (IS_ERR(gpu->ebi1_clk)) 649 gpu->ebi1_clk = NULL; 650 651 /* Acquire regulators: */ 652 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); 653 DBG("gpu_reg: %p", gpu->gpu_reg); 654 if (IS_ERR(gpu->gpu_reg)) 655 gpu->gpu_reg = NULL; 656 657 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); 658 DBG("gpu_cx: %p", gpu->gpu_cx); 659 if (IS_ERR(gpu->gpu_cx)) 660 gpu->gpu_cx = NULL; 661 662 /* Setup IOMMU.. eventually we will (I think) do this once per context 663 * and have separate page tables per context. For now, to keep things 664 * simple and to get something working, just use a single address space: 665 */ 666 iommu = iommu_domain_alloc(&platform_bus_type); 667 if (iommu) { 668 dev_info(drm->dev, "%s: using IOMMU\n", name); 669 gpu->mmu = msm_iommu_new(&pdev->dev, iommu); 670 if (IS_ERR(gpu->mmu)) { 671 ret = PTR_ERR(gpu->mmu); 672 dev_err(drm->dev, "failed to init iommu: %d\n", ret); 673 gpu->mmu = NULL; 674 iommu_domain_free(iommu); 675 goto fail; 676 } 677 678 } else { 679 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 680 } 681 gpu->id = msm_register_mmu(drm, gpu->mmu); 682 683 684 /* Create ringbuffer: */ 685 mutex_lock(&drm->struct_mutex); 686 gpu->rb = msm_ringbuffer_new(gpu, ringsz); 687 mutex_unlock(&drm->struct_mutex); 688 if (IS_ERR(gpu->rb)) { 689 ret = PTR_ERR(gpu->rb); 690 gpu->rb = NULL; 691 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret); 692 goto fail; 693 } 694 695 bs_init(gpu); 696 697 return 0; 698 699 fail: 700 return ret; 701 } 702 703 void msm_gpu_cleanup(struct msm_gpu *gpu) 704 { 705 DBG("%s", gpu->name); 706 707 WARN_ON(!list_empty(&gpu->active_list)); 708 709 bs_fini(gpu); 710 711 if (gpu->rb) { 712 if (gpu->rb_iova) 713 msm_gem_put_iova(gpu->rb->bo, gpu->id); 714 msm_ringbuffer_destroy(gpu->rb); 715 } 716 717 if (gpu->mmu) 718 gpu->mmu->funcs->destroy(gpu->mmu); 719 720 if (gpu->fctx) 721 msm_fence_context_free(gpu->fctx); 722 } 723